file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
TwitterMiner_UserAllTweets.py | #!/usr/bin/env python
# encoding: utf-8
import os
import sqlite3 as lite
import sys
import json
import time | from tweepy import OAuthHandler
from TwitterMiner_settings import *
import hashlib
#from Twitter_validate import validate_image
def dump_hash(twitter_dump):
data_hash = None # Ensure the value starts with nothing
dump = hashlib.sha1()
dump.update(twitter_dump)
data_hash = dump.hexdigest()
return data_hash
def file_hash(point_to_file):
hash_sha1 = hashlib.sha1()
with open(point_to_file, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha1.update(chunk)
print(hash_sha1.hexdigest())
return hash_sha1.hexdigest()
def extract_image_blob(posted_image_dest):
with open("test.jpg", "wb") as image_file:
c.execute("SELECT tweeted_image FROM T_Tweets WHERE Tweet_id = " + str(tweet_id))
ablob = c.fetchone()
image_file.write(ablob[0])
def create_db(table_name):
c.execute("PRAGMA journal_mode = WAL")
c.execute("CREATE TABLE IF NOT EXISTS " + table_name + "(tweet_id INTEGER NOT NULL PRIMARY KEY, date_mined TEXT, screen_name TEXT, \
user_id INTEGER, users_name TEXT, created_at_UTC TEXT, is_retweet TEXT, \
retweeted_times TEXT, text TEXT, place_name TEXT, country_code TEXT, country TEXT, \
bounding_box TEXT, source_tweeted TEXT, geo TEXT, in_reply_to_user TEXT, \
inreply_statusid TEXT, posted_image_dest TEXT, tweeted_image BLOB, image_hash TEXT, \
media_type TEXT, media_url TEXT, media_id TEXT, posted_video_dest TEXT, \
tweeted_video BLOB, video_hash TEXT, video_type TEXT, video_url TEXT, \
url_in_tweet TEXT, status BLOB, status_hash TEXT, bookmark TEXT)")
conn.commit()
def get_all_tweets(screen_name):
#Twitter only allows access to a users most recent 3240 tweets with this method
#initialize a list to hold all the tweepy Tweets
alltweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
try:
new_tweets = api.user_timeline(screen_name = screen_name, count=200)
except tweepy.TweepError:
print("Failed to pull tweets from %s" % screen_name)
print("User may be protected/private.")
print("Exiting...")
sys.exit()
except tweepy.RateLimitError: # I want to add code here to switch creds if a Rate limit occurs
print("Failed to pull the tweets due to a Twitter Rate Limit error.")
print("Please wait 15 min and try again...")
sys.exit()
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
print("getting tweets before %s" % (oldest))
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print("...%s tweets downloaded so far" % (len(alltweets)))
#transform the tweepy tweets into a 2D array that will populate the csv
for status in alltweets:
# Pull the pieces of the tweet and put them in a variable
Tweetid = status.id
screenname = status.user.screen_name
userid = status.user.id
usersname = status.user.name
tweettime = status.created_at
# Checks to see if status has the attribute of status.retweeted_status, then assigns is_retweet a value
if hasattr(status, 'retweeted_status'):
is_retweet = True
#Added this section on 6-19-19 due to truncated ReTweets
#This checks for populated data in the extended_tweet
#If data is populated, it pulls the entire full_text
#Thanks to Fraser Phillips for finding this issue
if hasattr(status.retweeted_status, 'extended_tweet'):
Amp_text = str(status.retweeted_status.extended_tweet['full_text'])
tweet = "RT: " + Amp_text.replace('&','&')
else:
Amp_text = status.retweeted_status.text
tweet = "RT: " + Amp_text.replace('&','&')
else:
is_retweet = False
Amp_text = status.text
tweet = Amp_text.replace('&','&')
retweeted_times = status.retweet_count
if status.place is not None:
placename = status.place.full_name
countrycode = status.place.country_code
country = status.place.country
boundingbox = str(status.place.bounding_box.coordinates)
else:
placename = None
countrycode = None
country = None
boundingbox = None
Tweet_source = status.source
geo = status.geo
if geo is not None:
geo = json.dumps(geo)
inreplytouser = status.in_reply_to_screen_name
inreply_tostatus = status.in_reply_to_status_id_str
#Checks for Media in the Tweet and downloads it
if 'media' in status.entities:
image_posted = status.entities['media'][0]['media_url']
remove_tweet_url = image_posted.split('/')[-1]
posted_image_dest = os.path.join("Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_image/" + remove_tweet_url)
image_path = "Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_image/"
if not os.path.exists(image_path):
os.makedirs(image_path)
try:
print("Downloading... %s" % posted_image_dest)
urllib.request.urlretrieve(image_posted, filename = posted_image_dest)
tweeted_image = open(posted_image_dest, "rb").read()
image_hash = dump_hash(tweeted_image)
except urllib.error.URLError as e:
print("Error downloading file... %s ... from TweetID: %s" % (posted_image_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE"
tweeted_image = None
image_hash = None
pass
except:
print("Error downloading file... %s ... from TweetID: %s" % (posted_image_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE - Unknown Error"
tweeted_image = None
image_hash = None
pass
mediatype = status.entities['media'][0]['type']
mediaurl = status.entities['media'][0]['media_url']
mediaid = status.entities['media'][0]['id']
else:
posted_image_dest = None
mediatype = None
mediaurl = None
mediaid = None
tweeted_image = None
image_hash = None
# New video Code
#Checks for Video in the tweets and downloads it
if hasattr(status, 'extended_entities'):
if 'video_info' in status.extended_entities['media'][0]:
# This section checks the number of dictionaries are in the variants
# It then looks at the bitrate of the variants and determines the highest value
# Once the highest value is determined, it extracts that video.
variant_times = len(status.extended_entities['media'][0]['video_info']['variants']) # Gets the number of variants
bit_rate = -1
for variant_count in range(0, variant_times): #iterates through all the variants in that tweets
if 'bitrate' in status.extended_entities['media'][0]['video_info']['variants'][variant_count] and \
bit_rate < status.extended_entities['media'][0]['video_info']['variants'][variant_count]['bitrate']:
bit_rate = status.extended_entities['media'][0]['video_info']['variants'][variant_count]['bitrate']
videourl = status.extended_entities['media'][0]['video_info']['variants'][variant_count]['url']
videotype = status.extended_entities['media'][0]['video_info']['variants'][variant_count]['content_type']
remove_video_url = videourl.split('/')[-1]
posted_video_dest = os.path.join("Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_video/" + remove_video_url)
video_path = "Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_video/"
if not os.path.exists(video_path):
os.makedirs(video_path)
try:
print("Downloading... %s" % posted_video_dest)
urllib.request.urlretrieve(videourl, filename = posted_video_dest)
tweeted_video = open(posted_video_dest, "rb").read()
video_hash = dump_hash(tweeted_video)
except urllib.error.URLError as e:
print("Error downloading file... %s ... from TweetID: %s" % (posted_video_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE"
tweeted_video = None
video_hash = None
pass
except:
print("Error downloading file... %s ... from TweetID: %s" % (posted_video_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE"
tweeted_video = None
video_hash = None
pass
else:
posted_video_dest = None
videotype= None
videourl= None
tweeted_video = None
video_hash = None
else:
posted_video_dest = None
videotype= None
videourl= None
tweeted_video = None
video_hash = None
# End Video Check
# End new video Code
if not status.entities['urls']:
url_in_tweet = None
else:
url_in_tweet = str(status.entities['urls'][0]['url'])
#Grab the current date and time
now = time.strftime("%c")
# Starts the raw hash process
status_dump = str(status).encode('utf-8')
status_hash = dump_hash(status_dump)
bookmark = None
# Writes the data collected in the variables to the database
try:
c.execute("INSERT INTO " + table_name + "(tweet_id, date_mined, screen_name, user_id, users_name, \
created_at_UTC, is_retweet, retweeted_times,text, place_name, \
country_code, country, bounding_box, source_tweeted, geo, \
in_reply_to_user, inreply_statusid, posted_image_dest, \
tweeted_image, image_hash, media_type, media_url, media_id, \
posted_video_dest, tweeted_video, video_hash, video_type, \
video_url, url_in_tweet, status, status_hash, bookmark) \
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)" , \
(Tweetid,
now,
screenname,
userid,
usersname,
tweettime,
is_retweet,
retweeted_times,
tweet,
placename,
countrycode,
country,
boundingbox,
Tweet_source,
geo,
inreplytouser,
inreply_tostatus,
posted_image_dest,
tweeted_image,
image_hash,
mediatype,
mediaurl,
mediaid,
posted_video_dest,
tweeted_video,
video_hash,
videotype,
videourl,
url_in_tweet,
str(status),
status_hash,
bookmark))
conn.commit()
print(str(Tweetid), "--- Successfully added to the Database")
except lite.IntegrityError:
print(str(Tweetid), "--- Record already Exists")
if __name__ == '__main__':
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#---------
#---------
#--------- Be sure to enter a unique case name -- This is handled in TwitterMiner_settings now
#---------
#---------
casename = CASE_NAME
dbname = casename + ".db"
conn = lite.connect(dbname)
c = conn.cursor()
screenname = USER_NAME
table_name = USER_NAME + "_Tweets"
create_db(table_name)
get_all_tweets(screenname)
print("\n Finished collecting Tweets from user --- %s" % screenname)
print("Results are stored in " + casename)
#validate_image('T_Tweets') | import urllib.request
import tweepy
from TwitterMiner_Keys import * |
HeaderWeeks.tsx |
/** Return array of week day names.
*
* getWeekDays() --> ['Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Su']
*/
const getWeekDays = (m, localization) => {
const weekDays = [];
const day = localization ? m().locale(localization).startOf('week') : m().startOf('week');
for (let i = 0; i < 7; i++) {
weekDays[i] = day.format('dd');
day.add(1, 'd');
}
return weekDays;
};
const cellStyle = {
border: 'none',
borderBottom: '1px solid rgba(34,36,38,.1)',
};
const getWeekDayCells = (m, localization) => getWeekDays(m, localization).map((weekDay) => (
<Table.HeaderCell
key={weekDay}
style={cellStyle}
colSpan='1'>
{weekDay}
</Table.HeaderCell>
));
export interface HeaderWeeksProps {
/** Moment date localization */
localization?: string;
}
function HeaderWeeks(props: HeaderWeeksProps) {
const {
localization,
} = props;
return (
<Table.Row>
{ getWeekDayCells(moment, localization) }
</Table.Row>
);
}
export default HeaderWeeks; | import moment from 'moment';
import * as React from 'react';
import { Table } from 'semantic-ui-react'; |
|
const.go | package predefined
const (
AuditStatusInProgress = iota //进程中
AuditStatusReject //拒绝
AuditStatusPass //通过
)
const (
ProfileInfoSexUnknown = iota //未知
ProfileInfoSexFemale //女
ProfileInfoSexMale //男
)
const (
ProfileInfoBasicMarriageUnmarried = iota //未结婚
ProfileInfoBasicMarriageDivorced //离异
ProfileInfoBasicMarriageMarried //已婚
)
const (
ProfileInfoAddressTypeHometown = iota //家乡
ProfileInfoAddressTypeResidence //居住地
)
const (
ProfileInfoConstellationAries = iota //白羊座:3月21日~4月20日 (Aries) | ProfileInfoConstellationCancer //巨蟹座:6月22日~7月22日 (Cancer)
ProfileInfoConstellationLeo //狮子座:7月23日~8月23日 (Leo)
ProfileInfoConstellationVirgo //处女座:8月24日~9月23日 (Virgo)
ProfileInfoConstellationLibra //天秤座:9月24日~10月23日 (Libra)
ProfileInfoConstellationScorpio //天蝎座:10月24日~11月22日 (Scorpio)
ProfileInfoConstellationSagittarius //射手座:11月23日~12月21日 (Sagittarius)
ProfileInfoConstellationCapricorn //摩羯座:12月22日~1月20日 (Capricorn)
ProfileInfoConstellationAquarius //水瓶座:1月21日~2月19日 (Aquarius)
ProfileInfoConstellationPisces //双鱼座:2月20日~3月20日 (Pisces)
)
const (
ProfileInfoProfessionAnnualIncome5_15 = iota //年薪:5万~15万
ProfileInfoProfessionAnnualIncome15_30 //年薪:15万~30万
ProfileInfoProfessionAnnualIncome30_50 //年薪:30万~50万
ProfileInfoProfessionAnnualIncome50_100 //年薪:50万~100万
ProfileInfoProfessionAnnualIncome100_500 //年薪:100万~500万
ProfileInfoProfessionAnnualIncome500_ //年薪:500万以上
)
const (
ProfileQuestionTypeValues = iota //价值观
)
const (
ProfileInfoIntroductionTypeAboutMe = iota //关于我
ProfileInfoIntroductionTypeFamilyBackground //家庭背景
ProfileInfoIntroductionTypeHobbies //兴趣爱好
)
const (
ProfileProofProfessionTypeCompanySocialSecurity = iota //社保
ProfileProofProfessionTypeCompanyEnterpriseOfficeSoftware //企业软件
ProfileProofProfessionTypeCompanyLicense //执照
ProfileProofProfessionTypeCompanyWorkPermit //工作证
ProfileProofProfessionTypeCompanyPaySlip //工资单
ProfileProofProfessionTypeCompanyOffer //录取Offer
ProfileProofProfessionTypeStudent //学生
)
const (
ProfileProofEducationTypeCHSI = iota //学信网
ProfileProofEducationTypeDiplomaImage //毕业证文凭照片
ProfileProofEducationTypeDiplomaID //毕业证文凭编号
ProfileProofEducationTypeCSCSE //教育部留学服务中心认证
ProfileProofEducationTypeOldCSCSE //教育部留学服务中心认证(旧版)
) | ProfileInfoConstellationTaurus //金牛座:4月21日~5月21日 (Taurus)
ProfileInfoConstellationGemini //双子座:5月22日~6月21日 (Gemini) |
yum.py | '''
Manage yum packages and repositories. Note that yum package names are case-sensitive.
'''
from __future__ import unicode_literals
from pyinfra.api import operation
from . import files
from .util.packaging import ensure_packages, ensure_rpm, ensure_yum_repo
@operation
def key(state, host, key):
'''
Add yum gpg keys with ``rpm``.
+ key: filename or URL
Note:
always returns one command, not state checking
Example:
.. code:: python
linux_id = host.fact.linux_distribution['release_meta'].get('ID')
yum.key(
{'Add the Docker CentOS gpg key'},
'https://download.docker.com/linux/{}/gpg'.format(linux_id),
)
'''
yield 'rpm --import {0}'.format(key)
@operation
def repo(
state, host, name, baseurl=None,
present=True, description=None, enabled=True, gpgcheck=True, gpgkey=None,
):
# NOTE: if updating this docstring also update `dnf.repo`
# COMPAT: on v1 rearrange baseurl/present kwargs
'''
Add/remove/update yum repositories.
+ name: URL or name for the ``.repo`` file
+ baseurl: the baseurl of the repo (if ``name`` is not a URL)
+ present: whether the ``.repo`` file should be present
+ description: optional verbose description
+ enabled: whether this repo is enabled
+ gpgcheck: whether set ``gpgcheck=1``
+ gpgkey: the URL to the gpg key for this repo
``Baseurl``/``description``/``gpgcheck``/``gpgkey``:
These are only valid when ``name`` is a filename (ie not a URL). This is
for manual construction of repository files. Use a URL to download and
install remote repository files.
Examples:
.. code:: python
# Download a repository file
yum.repo(
{'Install Docker-CE repo via URL'},
'https://download.docker.com/linux/centos/docker-ce.repo',
)
# Create the repository file from baseurl/etc
yum.repo(
{'Add the Docker CentOS repo'},
name='DockerCE',
baseurl='https://download.docker.com/linux/centos/7/$basearch/stable',
)
'''
yield ensure_yum_repo(
state, host, files,
name, baseurl, present, description, enabled, gpgcheck, gpgkey,
'yum-config-manager',
)
@operation
def rpm(state, host, source, present=True):
# NOTE: if updating this docstring also update `dnf.rpm`
|
@operation
def update(state, host):
'''
Updates all yum packages.
'''
yield 'yum update -y'
_update = update # noqa: E305 (for use below where update is a kwarg)
@operation
def packages(
state, host, packages=None,
present=True, latest=False, update=False, clean=False, nobest=False,
extra_install_args='', extra_uninstall_args='',
):
'''
Install/remove/update yum packages & updates.
+ packages: list of packages to ensure
+ present: whether the packages should be installed
+ latest: whether to upgrade packages without a specified version
+ update: run yum update
+ clean: run yum clean
+ nobest: add the no best option to install
+ extra_install_args: additional arguments to the yum install command
+ extra_uninstall_args: additional arguments to the yum uninstall command
Versions:
Package versions can be pinned like yum: ``<pkg>-<version>``
Examples:
.. code:: python
# Update package list and install packages
yum.packages(
{'Install Vim and Vim enhanced'},
['vim-enhanced', 'vim'],
update=True,
)
# Install the latest versions of packages (always check)
yum.packages(
{'Install latest Vim'},
['vim'],
latest=True,
)
'''
if clean:
yield 'yum clean all'
if update:
yield _update(state, host)
nobest_option = ''
if nobest:
nobest_option = ' --nobest'
if extra_install_args != '':
extra_install_args = ' ' + extra_install_args
if extra_uninstall_args != '':
extra_uninstall_args = ' ' + extra_uninstall_args
yield ensure_packages(
packages, host.fact.rpm_packages, present,
install_command='yum install -y' + nobest_option + extra_install_args,
uninstall_command='yum remove -y' + extra_uninstall_args,
upgrade_command='yum update -y',
version_join='-',
latest=latest,
)
| '''
Add/remove ``.rpm`` file packages.
+ source: filename or URL of the ``.rpm`` package
+ present: whether ore not the package should exist on the system
URL sources with ``present=False``:
If the ``.rpm`` file isn't downloaded, pyinfra can't remove any existing
package as the file won't exist until mid-deploy.
Example:
.. code:: python
yum.rpm(
{'Install EPEL rpm to enable EPEL repo'},
'https://dl.fedoraproject.org/pub/epel/epel-release-latest-'
'{{ host.fact.linux_distribution.major }}.noarch.rpm',
)
'''
yield ensure_rpm(state, host, files, source, present, 'yum') |
transfer.rs |
use ash::vk;
use ash::version::DeviceV1_0;
use crate::command::record::{ GsCmdRecorder, GsVkCommandType };
use crate::command::traits::IntoVKBarrier;
use crate::image::ImageBarrierCI;
use crate::utils::phantom::Transfer;
impl GsVkCommandType for Transfer {
// Empty...
}
impl GsCmdTransferApi for GsCmdRecorder<Transfer> {
fn copy_buf2buf(&self, src_buffer_handle: vk::Buffer, dst_buffer_handle: vk::Buffer, regions: &[vk::BufferCopy]) -> &Self {
unsafe {
self.device.logic.handle.cmd_copy_buffer(self.cmd_handle, src_buffer_handle, dst_buffer_handle, regions);
} self
}
fn copy_buf2img(&self, src_handle: vk::Buffer, dst_handle: vk::Image, dst_layout: vk::ImageLayout, regions: &[vk::BufferImageCopy]) -> &Self {
unsafe {
self.device.logic.handle.cmd_copy_buffer_to_image(self.cmd_handle, src_handle, dst_handle, dst_layout, regions);
} self
}
fn | (&self, src_handle: vk::Image, src_layout: vk::ImageLayout, dst_buffer: vk::Buffer, regions: &[vk::BufferImageCopy]) -> &Self {
unsafe {
self.device.logic.handle.cmd_copy_image_to_buffer(self.cmd_handle, src_handle, src_layout, dst_buffer, regions);
} self
}
fn copy_img2img(&self,src_handle: vk::Image, src_layout: vk::ImageLayout, dst_handle: vk::Image, dst_layout: vk::ImageLayout, regions: &[vk::ImageCopy]) -> &Self {
unsafe {
self.device.logic.handle.cmd_copy_image(self.cmd_handle, src_handle, src_layout, dst_handle, dst_layout, regions);
} self
}
fn image_pipeline_barrier(&self, src_stage: vk::PipelineStageFlags, dst_stage: vk::PipelineStageFlags, dependencies: vk::DependencyFlags, image_barriers: Vec<ImageBarrierCI>) -> &Self {
let barriers: Vec<vk::ImageMemoryBarrier> = image_barriers.into_iter()
.map(|b| b.into_barrier()).collect();
unsafe {
self.device.logic.handle.cmd_pipeline_barrier(self.cmd_handle, src_stage, dst_stage, dependencies, &[], &[], &barriers);
} self
}
fn blit_image(&self, src_handle: vk::Image, src_layout: vk::ImageLayout, dst_handle: vk::Image, dst_layout: vk::ImageLayout, regions: &[vk::ImageBlit], filter: vk::Filter) -> &Self {
unsafe {
self.device.logic.handle.cmd_blit_image(self.cmd_handle, src_handle, src_layout, dst_handle, dst_layout, regions, filter);
} self
}
}
pub trait GsCmdTransferApi {
fn copy_buf2buf(&self, src_buffer_handle: vk::Buffer, dst_buffer_handle: vk::Buffer, regions: &[vk::BufferCopy]) -> &Self;
fn copy_buf2img(&self, src_handle: vk::Buffer, dst_handle: vk::Image, dst_layout: vk::ImageLayout, regions: &[vk::BufferImageCopy]) -> &Self;
fn copy_img2buf(&self, src_handle: vk::Image, src_layout: vk::ImageLayout, dst_buffer: vk::Buffer, regions: &[vk::BufferImageCopy]) -> &Self;
fn copy_img2img(&self,src_handle: vk::Image, src_layout: vk::ImageLayout, dst_handle: vk::Image, dst_layout: vk::ImageLayout, regions: &[vk::ImageCopy]) -> &Self;
fn image_pipeline_barrier(&self, src_stage: vk::PipelineStageFlags, dst_stage: vk::PipelineStageFlags, dependencies: vk::DependencyFlags, image_barriers: Vec<ImageBarrierCI>) -> &Self;
fn blit_image(&self, src_handle: vk::Image, src_layout: vk::ImageLayout, dst_handle: vk::Image, dst_layout: vk::ImageLayout, regions: &[vk::ImageBlit], filter: vk::Filter) -> &Self;
}
| copy_img2buf |
play_db.py | # ==================================
# Author : fang
# Time : 2020/4/8 pm 8:55
# Email : [email protected]
# File : play_db.py
# Software : PyCharm
# ==================================
import datetime
DB = {}
class PlayDB:
def __init__(self, inherited=False):
if inherited:
self.__store = DB # 数据暂存空间
else:
self.__store = {}
@staticmethod
def __timestamp():
time_stamp = | ve(self, **kwargs):
tc = 0
for k, v in kwargs.items():
tc += 1
if not self.key_is_exists(k):
db_data = {"value": v, "data_stamp": self.__timestamp()}
else:
db_data = {"value": v, "data_stamp": self.__store[k]["data_stamp"]}
self.__store[k] = db_data
return tc
def delete(self, key):
if self.key_is_exists(key):
tv = self.__store.get(key)
del self.__store[key]
return {key: tv}
return False
def __get_or_consume(self, key, _all=False, _d=False):
if self.key_is_exists(key):
if not _all:
this_value = self.__store.get(key)["value"]
else:
this_value = self.__store.get(key)
if _d:
self.delete(key)
return this_value
raise ValueError(f"{key} does not exists in store")
def update(self, **kwargs):
return self.save(**kwargs)
def get(self, key):
return self.__get_or_consume(key, _all=False, _d=False)
def consume(self, key):
return self.__get_or_consume(key, _all=False, _d=True)
def key_is_exists(self, key):
return key in self.__store.keys()
play_global = PlayDB(inherited=True)
| datetime.datetime.now().timestamp()
time_stamp = int(round(time_stamp * 3000))
return time_stamp
def sa |
lib.rs | #![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)]
#![warn(
missing_copy_implementations,
missing_debug_implementations,
clippy::explicit_iter_loop,
clippy::future_not_send,
clippy::use_self,
clippy::clone_on_ref_ptr
)]
use data_types::{
chunk_metadata::{ChunkAddr, ChunkId, ChunkLifecycleAction, ChunkOrder, ChunkStorage},
database_rules::LifecycleRules,
DatabaseName,
};
use internal_types::access::AccessMetrics;
use std::sync::Arc;
use tracker::TaskTracker;
mod guard;
pub use guard::*;
mod policy;
pub use policy::*;
use time::{Time, TimeProvider};
/// A trait that encapsulates the database logic that is automated by `LifecyclePolicy`
pub trait LifecycleDb {
type Chunk: LockableChunk;
type Partition: LockablePartition;
/// Return the in-memory size of the database. We expect this
/// to change from call to call as chunks are dropped
fn buffer_size(&self) -> usize;
/// Returns the lifecycle policy
fn rules(&self) -> LifecycleRules;
/// Returns a list of lockable partitions in the database
fn partitions(&self) -> Vec<Self::Partition>;
/// Return the database name.
fn name(&self) -> DatabaseName<'static>;
/// Return the time provider for this database
fn time_provider(&self) -> &Arc<dyn TimeProvider>;
}
/// A `LockablePartition` is a wrapper around a `LifecyclePartition` that allows
/// for planning and executing lifecycle actions on the partition
pub trait LockablePartition: Sized + std::fmt::Display {
type Partition: LifecyclePartition;
type Chunk: LockableChunk;
type PersistHandle: PersistHandle + Send + Sync + 'static;
type Error: std::error::Error + Send + Sync;
/// Acquire a shared read lock on the chunk
fn read(&self) -> LifecycleReadGuard<'_, Self::Partition, Self>;
/// Acquire an exclusive write lock on the chunk
fn write(&self) -> LifecycleWriteGuard<'_, Self::Partition, Self>;
/// Returns a specific chunk
fn chunk(
s: &LifecycleReadGuard<'_, Self::Partition, Self>,
chunk_id: ChunkId,
) -> Option<Self::Chunk>;
/// Return a list of lockable chunks in this partition.
///
/// This must be ordered by `(order, id)`.
fn chunks(s: &LifecycleReadGuard<'_, Self::Partition, Self>) -> Vec<Self::Chunk>;
/// Compact chunks into a single read buffer chunk
///
/// TODO: Encapsulate these locks into a CatalogTransaction object
fn compact_chunks(
partition: LifecycleWriteGuard<'_, Self::Partition, Self>,
chunks: Vec<LifecycleWriteGuard<'_, <Self::Chunk as LockableChunk>::Chunk, Self::Chunk>>,
) -> Result<TaskTracker<<Self::Chunk as LockableChunk>::Job>, Self::Error>;
/// Returns a PersistHandle for the provided partition, and the
/// timestamp up to which to to flush
///
/// Returns None if there is a persistence operation in flight, or
/// if there are no persistable windows.
///
/// If `force` is `true` will persist all unpersisted data regardless of arrival time
fn prepare_persist(
partition: &mut LifecycleWriteGuard<'_, Self::Partition, Self>,
force: bool,
) -> Option<Self::PersistHandle>;
/// Split and persist chunks.
///
/// Combines and deduplicates the data in `chunks` into two new chunks:
///
/// 1. A read buffer chunk that contains any rows with timestamps
/// prior to `flush_timestamp`
///
/// 2. A read buffer chunk (also written to the object store) with
/// all other rows
///
/// TODO: Encapsulate these locks into a CatalogTransaction object
fn persist_chunks(
partition: LifecycleWriteGuard<'_, Self::Partition, Self>,
chunks: Vec<LifecycleWriteGuard<'_, <Self::Chunk as LockableChunk>::Chunk, Self::Chunk>>,
handle: Self::PersistHandle,
) -> Result<TaskTracker<<Self::Chunk as LockableChunk>::Job>, Self::Error>;
/// Drops a chunk from the partition
fn drop_chunk(
partition: LifecycleWriteGuard<'_, Self::Partition, Self>,
chunk: LifecycleWriteGuard<'_, <Self::Chunk as LockableChunk>::Chunk, Self::Chunk>,
) -> Result<TaskTracker<<Self::Chunk as LockableChunk>::Job>, Self::Error>;
}
/// A `LockableChunk` is a wrapper around a `LifecycleChunk` that allows for
/// planning and executing lifecycle actions on the chunk
///
/// Specifically a read lock can be obtained, a decision made based on the chunk's
/// data, and then a lifecycle action optionally triggered, all without allowing
/// concurrent modification
///
/// See the module level documentation for the guard module for more information
/// on why this trait is the way it is
///
pub trait LockableChunk: Sized {
type Chunk: LifecycleChunk;
type Job: Sized + Send + Sync + 'static;
type Error: std::error::Error + Send + Sync;
/// Acquire a shared read lock on the chunk
fn read(&self) -> LifecycleReadGuard<'_, Self::Chunk, Self>;
/// Acquire an exclusive write lock on the chunk
fn write(&self) -> LifecycleWriteGuard<'_, Self::Chunk, Self>;
/// Remove the copy of the Chunk's data from the read buffer.
///
/// Note that this can only be called for persisted chunks
/// (otherwise the read buffer may contain the *only* copy of this
/// chunk's data). In order to drop un-persisted chunks, |
fn id(&self) -> ChunkId;
fn order(&self) -> ChunkOrder;
}
pub trait LifecyclePartition {
fn partition_key(&self) -> &str;
/// Returns true if all chunks in the partition are persisted.
fn is_persisted(&self) -> bool;
/// Returns an approximation of the number of rows that can be persisted
///
/// `now` is the wall clock time that should be used to compute how long a given
/// write has been present in memory
fn persistable_row_count(&self) -> usize;
/// Returns the age of the oldest unpersisted write
fn minimum_unpersisted_age(&self) -> Option<Time>;
}
/// The lifecycle operates on chunks implementing this trait
pub trait LifecycleChunk {
fn lifecycle_action(&self) -> Option<&TaskTracker<ChunkLifecycleAction>>;
fn clear_lifecycle_action(&mut self);
/// Returns the min timestamp contained within this chunk
fn min_timestamp(&self) -> Time;
/// Returns the access metrics for this chunk
fn access_metrics(&self) -> AccessMetrics;
fn time_of_last_write(&self) -> Time;
fn addr(&self) -> &ChunkAddr;
fn storage(&self) -> ChunkStorage;
fn row_count(&self) -> usize;
}
/// The trait for a persist handle
pub trait PersistHandle {
/// Any unpersisted chunks containing rows with timestamps less than or equal to this
/// must be included in the corresponding `LockablePartition::persist_chunks` call
fn timestamp(&self) -> Time;
} | /// [`drop_chunk`](LockablePartition::drop_chunk) must be used.
fn unload_read_buffer(s: LifecycleWriteGuard<'_, Self::Chunk, Self>)
-> Result<(), Self::Error>; |
main.go | package main
import (
_ "crypto/md5"
"fmt"
"log"
"net/http"
"time"
"github.com/gorilla/mux"
"github.com/shaj13/libcache"
_ "github.com/shaj13/libcache/fifo"
"github.com/shaj13/go-guardian/v2/auth"
"github.com/shaj13/go-guardian/v2/auth/strategies/digest"
)
// Usage:
// curl --digest --user admin:admin http://127.0.0.1:8080/v1/book/1449311601
var strategy *digest.Digest
func init() {
var c libcache.Cache
c = libcache.FIFO.New(10)
c.SetTTL(time.Minute * 3)
c.RegisterOnExpired(func(key, _ interface{}) {
c.Delete(key)
})
strategy = digest.New(validateUser, c)
}
func main() {
router := mux.NewRouter()
router.HandleFunc("/v1/book/{id}", middleware(http.HandlerFunc(getBookAuthor))).Methods("GET")
log.Println("server started and listening on http://127.0.0.1:8080")
http.ListenAndServe("127.0.0.1:8080", router)
}
func getBookAuthor(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id := vars["id"]
books := map[string]string{
"1449311601": "Ryan Boyd",
"148425094X": "Yvonne Wilson",
"1484220498": "Prabath Siriwarden",
}
body := fmt.Sprintf("Author: %s \n", books[id])
w.Write([]byte(body))
}
func validateUser(userName string) (string, auth.Info, error) {
// here connect to db or any other service to fetch user and validate it.
if userName == "admin" {
return "admin", auth.NewDefaultUser("admin", "1", nil, nil), nil
}
return "", nil, fmt.Errorf("Invalid credentials")
}
func middleware(next http.Handler) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Println("Executing Auth Middleware")
user, err := strategy.Authenticate(r.Context(), r)
if err != nil |
log.Printf("User %s Authenticated\n", user.GetUserName())
next.ServeHTTP(w, r)
})
}
| {
code := http.StatusUnauthorized
w.Header().Add("WWW-Authenticate", strategy.GetChallenge())
http.Error(w, http.StatusText(code), code)
fmt.Println("send error", err)
return
} |
with_scientific.rs | use super::*;
use std::sync::Arc;
use proptest::strategy::{BoxedStrategy, Just, Strategy};
use liblumen_alloc::erts::process::Process;
use liblumen_alloc::erts::term::prelude::*;
use crate::erlang::charlist_to_string::charlist_to_string;
use crate::erlang::float_to_list_1;
#[test]
fn with_20_digits_is_the_same_as_float_to_list_1() {
with_process_arc(|arc_process| {
let digits = arc_process.integer(20).unwrap();
let options = arc_process
.list_from_slice(&[arc_process.tuple_from_slice(&[tag(), digits]).unwrap()])
.unwrap();
let zero = arc_process.float(0.0).unwrap();
assert_eq!(
native(&arc_process, zero, options).unwrap(),
float_to_list_1::native(&arc_process, zero).unwrap()
);
let one_tenth = arc_process.float(0.1).unwrap();
assert_eq!(
native(&arc_process, one_tenth, options).unwrap(),
float_to_list_1::native(&arc_process, one_tenth).unwrap()
);
});
}
#[test]
fn returns_list_with_coefficient_e_exponent() {
with_process_arc(|arc_process| {
let float = arc_process.float(1234567890.0987654321).unwrap();
assert_eq!(
native(&arc_process, float, options(&arc_process, 0)),
Ok(arc_process.charlist_from_str("1e+09").unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 1)),
Ok(arc_process.charlist_from_str("1.2e+09").unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 2)),
Ok(arc_process.charlist_from_str("1.23e+09").unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 3)),
Ok(arc_process.charlist_from_str("1.235e+09").unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 4)),
Ok(arc_process.charlist_from_str("1.2346e+09").unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 5)),
Ok(arc_process.charlist_from_str("1.23457e+09").unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 6)),
Ok(arc_process.charlist_from_str("1.234568e+09").unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 7)),
Ok(arc_process.charlist_from_str("1.2345679e+09").unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 8)),
Ok(arc_process.charlist_from_str("1.23456789e+09").unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 9)),
Ok(arc_process.charlist_from_str("1.234567890e+09").unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 10)),
Ok(arc_process.charlist_from_str("1.2345678901e+09").unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 11)),
Ok(arc_process.charlist_from_str("1.23456789010e+09").unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 12)),
Ok(arc_process.charlist_from_str("1.234567890099e+09").unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 13)),
Ok(arc_process
.charlist_from_str("1.2345678900988e+09")
.unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 14)),
Ok(arc_process
.charlist_from_str("1.23456789009877e+09")
.unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 15)),
Ok(arc_process
.charlist_from_str("1.234567890098765e+09")
.unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 16)),
Ok(arc_process
.charlist_from_str("1.2345678900987654e+09")
.unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 17)),
Ok(arc_process
.charlist_from_str("1.23456789009876537e+09")
.unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 18)),
Ok(arc_process
.charlist_from_str("1.234567890098765373e+09")
.unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 19)),
Ok(arc_process
.charlist_from_str("1.2345678900987653732e+09")
.unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 20)),
Ok(arc_process
.charlist_from_str("1.23456789009876537323e+09")
.unwrap())
);
assert_eq!(
native(&arc_process, float, options(&arc_process, 21)),
Ok(arc_process
.charlist_from_str("1.234567890098765373230e+09")
.unwrap())
);
});
}
#[test]
fn always_includes_e() {
run!(
|arc_process| {
(
Just(arc_process.clone()),
strategy::term::float(arc_process.clone()),
digits(arc_process.clone()).prop_map(move |digits| {
arc_process
.list_from_slice(&[arc_process.tuple_from_slice(&[tag(), digits]).unwrap()])
.unwrap()
}),
)
},
|(arc_process, float, options)| {
let result = native(&arc_process, float, options);
prop_assert!(result.is_ok());
let list = result.unwrap();
let string: String = charlist_to_string(list).unwrap();
prop_assert!(string.contains('e'));
Ok(())
},
);
}
#[test]
fn always_includes_sign_of_exponent() {
run!(
|arc_process| {
(
Just(arc_process.clone()),
strategy::term::float(arc_process.clone()),
digits(arc_process.clone()).prop_map(move |digits| {
arc_process
.list_from_slice(&[arc_process.tuple_from_slice(&[tag(), digits]).unwrap()])
.unwrap()
}),
)
},
|(arc_process, float, options)| {
let result = native(&arc_process, float, options);
prop_assert!(result.is_ok());
let list = result.unwrap();
let string: String = charlist_to_string(list).unwrap();
let part_vec: Vec<&str> = string.splitn(2, 'e').collect();
prop_assert_eq!(part_vec.len(), 2);
let sign = part_vec[1].chars().nth(0).unwrap();
prop_assert!(sign == '+' || sign == '-');
Ok(())
},
);
}
#[test]
fn exponent_is_at_least_2_digits() {
run!(
|arc_process| {
(
Just(arc_process.clone()),
strategy::term::float(arc_process.clone()),
digits(arc_process.clone()).prop_map(move |digits| {
arc_process
.list_from_slice(&[arc_process.tuple_from_slice(&[tag(), digits]).unwrap()])
.unwrap()
}),
)
},
|(arc_process, float, options)| {
let result = native(&arc_process, float, options);
prop_assert!(result.is_ok());
let list = result.unwrap();
let string: String = charlist_to_string(list).unwrap();
let part_vec: Vec<&str> = string.splitn(2, 'e').collect();
prop_assert_eq!(part_vec.len(), 2);
prop_assert!(2 <= part_vec[1].chars().skip(1).count());
Ok(())
},
);
}
fn | (arc_process: Arc<Process>) -> BoxedStrategy<Term> {
(Just(arc_process.clone()), 0..=249)
.prop_map(|(arc_process, u)| arc_process.integer(u).unwrap())
.boxed()
}
fn options(process: &Process, digits: u8) -> Term {
process
.list_from_slice(&[process
.tuple_from_slice(&[tag(), process.integer(digits).unwrap()])
.unwrap()])
.unwrap()
}
fn tag() -> Term {
Atom::str_to_term("scientific")
}
| digits |
devices.py | class Device:
def __init__(self, id=None, token=None, platform=None, endpoint=None, created_at=None, updated_at=None):
| self.id = id
self.token = token
self.platform = platform
self.endpoint = endpoint
self.created_at = created_at
self.updated_at = updated_at |
|
task.py | # Robot to enter weekly sales data into the RobotSpareBin Industries Intranet.
import os
from Browser import Browser
from Browser.utils.data_types import SelectAttribute
from RPA.Excel.Files import Files
from RPA.HTTP import HTTP
from RPA.PDF import PDF
browser = Browser()
def | ():
browser.new_page("https://robotsparebinindustries.com/")
def log_in():
browser.type_text("css=#username", "maria")
browser.type_secret("css=#password", "thoushallnotpass")
browser.click("text=Log in")
def download_the_excel_file():
http = HTTP()
http.download(
url="https://robotsparebinindustries.com/SalesData.xlsx",
overwrite=True)
def fill_and_submit_the_form_for_one_person(sales_rep):
browser.type_text("css=#firstname", sales_rep["First Name"])
browser.type_text("css=#lastname", sales_rep["Last Name"])
browser.type_text("css=#salesresult", str(sales_rep["Sales"]))
browser.select_options_by(
"css=#salestarget",
SelectAttribute["value"],
str(sales_rep["Sales Target"]))
browser.click("text=Submit")
def fill_the_form_using_the_data_from_the_excel_file():
excel = Files()
excel.open_workbook("SalesData.xlsx")
sales_reps = excel.read_worksheet_as_table(header=True)
excel.close_workbook()
for sales_rep in sales_reps:
fill_and_submit_the_form_for_one_person(sales_rep)
def collect_the_results():
browser.take_screenshot(
filename=f"{os.getcwd()}/output/sales_summary.png",
selector="css=div.sales-summary")
def export_the_table_as_a_pdf():
sales_results_html = browser.get_property(
selector="css=#sales-results", property="outerHTML")
pdf = PDF()
pdf.html_to_pdf(sales_results_html, "output/sales_results.pdf")
def log_out():
browser.click("text=Log out")
def main():
try:
open_the_intranet_website()
log_in()
download_the_excel_file()
fill_the_form_using_the_data_from_the_excel_file()
collect_the_results()
export_the_table_as_a_pdf()
finally:
log_out()
browser.playwright.close()
if __name__ == "__main__":
main()
| open_the_intranet_website |
LaboratoriesList.tsx | import React, {useEffect, useContext} from 'react';
import Row from 'react-bootstrap/Row';
import {useNavigate} from 'react-router-dom';
import {LaboratoriesTable} from '../../components/Laboratory';
import {Button, LoadingContainer, ModalComponent} from '../../components/UI';
import {Action} from '../../components/UI/Table/Table';
import {Laboratory} from '../../containers/Laboratory/types';
import {useListLaboratoriesQuery, useDeleteLaboratoryMutation} from '../../graphql/generated/schema';
import {notificationBannerContext} from '../../state/NotificationBannerProvider';
const LaboratoriesList: React.FC = () => {
const navigate = useNavigate();
const [loading, setLoading] = React.useState<boolean>(true);
const [displayModal, setDisplayModal] = React.useState<boolean>(false);
const [lab, setLab] = React.useState<Laboratory>();
const [labs, setLabs] = React.useState<Laboratory[]>([]);
const {data, loading: retrievingInfo} = useListLaboratoriesQuery({fetchPolicy: 'network-only'});
const [deleteLaboratory] = useDeleteLaboratoryMutation({});
const {showErrorBanner, showSuccessBanner} = useContext(notificationBannerContext);
useEffect(() => {
if (data && data.listLaboratorys?.items) {
const labsList: Laboratory[] = [];
data?.listLaboratorys?.items.forEach((obj) => {
if (obj && !obj._deleted) {
labsList.push({
id: obj.id,
name: obj.name,
description: obj.description ? obj.description : '',
organizationId: obj.organizationID,
version: obj._version
});
} | });
setLabs(labsList);
}
setLoading(retrievingInfo);
}, [data]);
const handleLaboratoryAction = (index: number, action: Action) => {
switch (action) {
case Action.Edit:
labs[index].id;
navigate('/lab-edition', {state: {laboratoryId: labs[index].id}});
break;
case Action.Delete:
setDisplayModal(true);
setLab(labs[index]);
break;
}
};
const handleDisplayDeleteModal = (display: boolean) => {
setDisplayModal(display);
setLab({
id: '',
name: '',
description: '',
organizationId: '',
version: null
});
};
const handleAcceptDelete = () => {
if (lab?.id) {
deleteLaboratory({
variables: {
input: {
id: lab?.id,
_version: lab.version
}
}
})
.then((response) => {
if (response.data?.deleteLaboratory?._deleted) {
setDisplayModal(false);
showSuccessBanner(`El laboratorio ${lab.name} fue eliminado exitosamente`);
}
})
.catch((error) => {
setDisplayModal(false);
showErrorBanner(`No se pudo eliminar el laboratprio ${lab.name}`);
});
}
};
return (
<LoadingContainer loading={loading}>
{
<ModalComponent
display={displayModal}
onDisplay={handleDisplayDeleteModal}
onSave={handleAcceptDelete}
title={lab?.name ? lab?.name : ''}>
<div>Está seguro de borrar el laboratorio {lab?.name}?</div>
</ModalComponent>
}
<Row className="section">
<h3 className="title">Laboratorios</h3>
</Row>
<Row className="section">
<LaboratoriesTable data={labs} onAction={handleLaboratoryAction} />
</Row>
<Row className="section">
<div className="justifyEnd">
<Button loading={false} onClick={() => navigate('/lab-creation')}>
Crear
</Button>
</div>
</Row>
</LoadingContainer>
);
};
export default LaboratoriesList; | |
main.js | ;
(function () { | //=require gtm-modules.js
//=require expose.js
})(); | 'use strict';
//=require globalVars.js |
wsgi.py | """
WSGI config for mis project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
if os.getenv("DJANGO_MODE").lower() != 'local':
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mis.settings")
if os.getenv("DJANGO_MODE").lower() != 'local':
application = Cling(get_wsgi_application())
else:
| application = get_wsgi_application() |
|
common.rs | use crate::Settings;
use std::{
ffi::OsStr,
fs::{self, File},
io::{self, BufRead, BufReader, BufWriter, Write},
path::{Component, Path, PathBuf},
process::{Command, Stdio},
};
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
/// Returns true if the path has a filename indicating that it is a high-desity
/// "retina" icon. Specifically, returns true the the file stem ends with
/// "@2x" (a convention specified by the [Apple developer docs](
/// https://developer.apple.com/library/mac/documentation/GraphicsAnimation/Conceptual/HighResolutionOSX/Optimizing/Optimizing.html)).
pub fn is_retina<P: AsRef<Path>>(path: P) -> bool {
path
.as_ref()
.file_stem()
.and_then(OsStr::to_str)
.map(|stem| stem.ends_with("@2x"))
.unwrap_or(false)
}
/// Creates a new file at the given path, creating any parent directories as
/// needed.
pub fn create_file(path: &Path) -> crate::Result<BufWriter<File>> {
if let Some(parent) = path.parent() {
fs::create_dir_all(&parent)?;
}
let file = File::create(path)?;
Ok(BufWriter::new(file))
}
/// Makes a symbolic link to a directory.
#[cfg(unix)]
fn symlink_dir(src: &Path, dst: &Path) -> io::Result<()> {
std::os::unix::fs::symlink(src, dst)
}
/// Makes a symbolic link to a directory.
#[cfg(windows)]
fn symlink_dir(src: &Path, dst: &Path) -> io::Result<()> {
std::os::windows::fs::symlink_dir(src, dst)
}
/// Makes a symbolic link to a file.
#[cfg(unix)]
fn symlink_file(src: &Path, dst: &Path) -> io::Result<()> {
std::os::unix::fs::symlink(src, dst)
}
/// Makes a symbolic link to a file.
#[cfg(windows)]
fn symlink_file(src: &Path, dst: &Path) -> io::Result<()> {
std::os::windows::fs::symlink_file(src, dst)
}
/// Copies a regular file from one path to another, creating any parent
/// directories of the destination path as necessary. Fails if the source path
/// is a directory or doesn't exist.
pub fn copy_file(from: impl AsRef<Path>, to: impl AsRef<Path>) -> crate::Result<()> {
let from = from.as_ref();
let to = to.as_ref();
if !from.exists() {
return Err(crate::Error::GenericError(format!(
"{:?} does not exist",
from
)));
}
if !from.is_file() {
return Err(crate::Error::GenericError(format!(
"{:?} is not a file",
from
)));
}
let dest_dir = to.parent().expect("No data in parent");
fs::create_dir_all(dest_dir)?;
fs::copy(from, to)?;
Ok(())
}
/// Recursively copies a directory file from one path to another, creating any
/// parent directories of the destination path as necessary. Fails if the
/// source path is not a directory or doesn't exist, or if the destination path
/// already exists.
pub fn copy_dir(from: &Path, to: &Path) -> crate::Result<()> {
if !from.exists() {
return Err(crate::Error::GenericError(format!(
"{:?} does not exist",
from
)));
}
if !from.is_dir() {
return Err(crate::Error::GenericError(format!(
"{:?} is not a Directory",
from
)));
}
if to.exists() {
return Err(crate::Error::GenericError(format!(
"{:?} already exists",
from
)));
}
let parent = to.parent().expect("No data in parent");
fs::create_dir_all(parent)?;
for entry in walkdir::WalkDir::new(from) {
let entry = entry?;
debug_assert!(entry.path().starts_with(from));
let rel_path = entry.path().strip_prefix(from)?;
let dest_path = to.join(rel_path);
if entry.file_type().is_symlink() {
let target = fs::read_link(entry.path())?;
if entry.path().is_dir() {
symlink_dir(&target, &dest_path)?;
} else {
symlink_file(&target, &dest_path)?;
}
} else if entry.file_type().is_dir() {
fs::create_dir(dest_path)?;
} else {
fs::copy(entry.path(), dest_path)?;
}
}
Ok(())
}
/// Given a path (absolute or relative) to a resource file, returns the
/// relative path from the bundle resources directory where that resource
/// should be stored.
pub fn resource_relpath(path: &Path) -> PathBuf {
let mut dest = PathBuf::new();
for component in path.components() {
match component {
Component::Prefix(_) => {}
Component::RootDir => dest.push("_root_"),
Component::CurDir => {}
Component::ParentDir => dest.push("_up_"),
Component::Normal(string) => dest.push(string),
}
}
dest
}
/// Prints a message to stderr, in the same format that `cargo` uses,
/// indicating that we are creating a bundle with the given filename.
pub fn print_bundling(filename: &str) -> crate::Result<()> {
print_progress("Bundling", filename)
}
/// Prints a message to stderr, in the same format that `cargo` uses,
/// indicating that we have finished the the given bundles.
pub fn print_finished(output_paths: &[PathBuf]) -> crate::Result<()> {
let pluralised = if output_paths.len() == 1 {
"bundle"
} else {
"bundles"
};
let msg = format!("{} {} at:", output_paths.len(), pluralised);
print_progress("Finished", &msg)?;
for path in output_paths {
println!(" {}", path.display());
}
Ok(())
}
/// Prints a formatted bundle progress to stderr.
fn print_progress(step: &str, msg: &str) -> crate::Result<()> {
let mut output = StandardStream::stderr(ColorChoice::Always);
let _ = output.set_color(ColorSpec::new().set_fg(Some(Color::Green)).set_bold(true));
write!(output, " {}", step)?;
output.reset()?;
writeln!(output, " {}", msg)?;
output.flush()?;
Ok(())
}
/// Prints a warning message to stderr, in the same format that `cargo` uses.
pub fn print_warning(message: &str) -> crate::Result<()> {
let mut output = StandardStream::stderr(ColorChoice::Always);
let _ = output.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)).set_bold(true));
write!(output, "warning:")?;
output.reset()?;
writeln!(output, " {}", message)?;
output.flush()?;
Ok(())
}
/// Prints a Info message to stderr.
pub fn print_info(message: &str) -> crate::Result<()> {
let mut output = StandardStream::stderr(ColorChoice::Always);
let _ = output.set_color(ColorSpec::new().set_fg(Some(Color::Green)).set_bold(true));
write!(output, "info:")?;
output.reset()?;
writeln!(output, " {}", message)?;
output.flush()?;
Ok(())
}
/// Prints an error to stderr, in the same format that `cargo` uses.
pub fn | (error: &anyhow::Error) -> crate::Result<()> {
let mut output = StandardStream::stderr(ColorChoice::Always);
let _ = output.set_color(ColorSpec::new().set_fg(Some(Color::Red)).set_bold(true));
write!(output, "error:")?;
output.reset()?;
let _ = output.set_color(ColorSpec::new().set_bold(true));
writeln!(output, " {}", error)?;
output.reset()?;
for cause in error.chain().skip(1) {
writeln!(output, " Caused by: {}", cause)?;
}
// Add Backtrace once its stable.
// if let Some(backtrace) = error.backtrace() {
// writeln!(output, "{:?}", backtrace)?;
// }
output.flush()?;
std::process::exit(1)
}
pub fn execute_with_verbosity(cmd: &mut Command, settings: &Settings) -> crate::Result<()> {
let stdio_config = if settings.is_verbose() {
Stdio::piped
} else {
Stdio::null
};
let mut child = cmd
.stdout(stdio_config())
.stderr(stdio_config())
.spawn()
.expect("failed to spawn command");
if settings.is_verbose() {
let stdout = child.stdout.as_mut().expect("Failed to get stdout handle");
let reader = BufReader::new(stdout);
for line in reader.lines() {
println!("{}", line.expect("Failed to get line"));
}
}
let status = child.wait()?;
if status.success() {
Ok(())
} else {
Err(anyhow::anyhow!("command failed").into())
}
}
#[cfg(test)]
mod tests {
use super::{create_file, is_retina, resource_relpath};
use std::{io::Write, path::PathBuf};
#[test]
fn create_file_with_parent_dirs() {
let tmp = tempfile::tempdir().expect("Unable to create temp dir");
assert!(!tmp.path().join("parent").exists());
{
let mut file =
create_file(&tmp.path().join("parent/file.txt")).expect("Failed to create file");
writeln!(file, "Hello, world!").expect("unable to write file");
}
assert!(tmp.path().join("parent").is_dir());
assert!(tmp.path().join("parent/file.txt").is_file());
}
#[cfg(not(windows))]
#[test]
fn copy_dir_with_symlinks() {
// Create a directory structure that looks like this:
// ${TMP}/orig/
// sub/
// file.txt
// link -> sub/file.txt
let tmp = tempfile::tempdir().expect("unable to create tempdir");
{
let mut file =
create_file(&tmp.path().join("orig/sub/file.txt")).expect("Unable to create file");
writeln!(file, "Hello, world!").expect("Unable to write to file");
}
super::symlink_file(
&PathBuf::from("sub/file.txt"),
&tmp.path().join("orig/link"),
)
.expect("Failed to create symlink");
assert_eq!(
std::fs::read(tmp.path().join("orig/link"))
.expect("Failed to read file")
.as_slice(),
b"Hello, world!\n"
);
// Copy ${TMP}/orig to ${TMP}/parent/copy, and make sure that the
// directory structure, file, and symlink got copied correctly.
super::copy_dir(&tmp.path().join("orig"), &tmp.path().join("parent/copy"))
.expect("Failed to copy dir");
assert!(tmp.path().join("parent/copy").is_dir());
assert!(tmp.path().join("parent/copy/sub").is_dir());
assert!(tmp.path().join("parent/copy/sub/file.txt").is_file());
assert_eq!(
std::fs::read(tmp.path().join("parent/copy/sub/file.txt"))
.expect("Failed to read file")
.as_slice(),
b"Hello, world!\n"
);
assert!(tmp.path().join("parent/copy/link").exists());
assert_eq!(
std::fs::read_link(tmp.path().join("parent/copy/link")).expect("Failed to read from symlink"),
PathBuf::from("sub/file.txt")
);
assert_eq!(
std::fs::read(tmp.path().join("parent/copy/link"))
.expect("Failed to read from file")
.as_slice(),
b"Hello, world!\n"
);
}
#[test]
fn retina_icon_paths() {
assert!(!is_retina("data/icons/512x512.png"));
assert!(is_retina("data/icons/[email protected]"));
}
#[test]
fn resource_relative_paths() {
assert_eq!(
resource_relpath(&PathBuf::from("./data/images/button.png")),
PathBuf::from("data/images/button.png")
);
assert_eq!(
resource_relpath(&PathBuf::from("../../images/wheel.png")),
PathBuf::from("_up_/_up_/images/wheel.png")
);
assert_eq!(
resource_relpath(&PathBuf::from("/home/ferris/crab.png")),
PathBuf::from("_root_/home/ferris/crab.png")
);
}
}
| print_error |
index.js | Page({
/**
* 页面的初始数据
*/
data: {
dataList: [
{
image: "/images/cat.png",
title: "猫",
describe:
"猫,属于猫科动物,分家猫、野猫,是全世界家庭中较为广泛的宠物。家猫的祖先据推测是起源于古埃及的沙漠猫,波斯的波斯猫,已经被人类驯化了3500年(但未像狗一样完全地被驯化)"
},
{
image: "/images/cat.png",
title: "猫",
describe:
"一般的猫:头圆、颜面部短,前肢五指,后肢四趾,趾端具锐利而弯曲的爪,爪能伸缩。夜行性。"
},
{
image: "/images/cat.png",
title: "猫",
describe:
"以伏击的方式猎捕其它动物,大多能攀援上树。猫的趾底有脂肪质肉垫,以免在行走时发出声响,捕猎时也不会惊跑鼠。行进时爪子处于收缩状态,防止爪被磨钝,在捕鼠和攀岩时会伸出来。"
},
{
image: "/images/cat.png",
title: "猫",
describe:
"猫是善于攀爬跳跃的动物,它的体内各种器官的平衡功能比其它动物要完善,当它从高处跳下来时,身体失去平衡,神经系统会迅速的指挥骨骼肌以最快的速度运动,将失去平衡的身体调整到正常的位置。"
},
{
image: "/images/cat.png",
title: "猫",
describe: "猫的九命也只是传说"
},
{
image: "/images/cat.png",
title: "猫",
describe: "虽然猫可以通过调整,把伤害减到最低"
},
{
image: "/images/cat.png",
title: "猫",
describe:
"猫的身体分为头、颈、躯干、四肢和尾五部分,大多数部位披毛,少数为无毛猫。"
},
{
image: "/images/cat.png", | "猫的趾底有脂肪质肉垫,因而行走无声,捕鼠时不会惊跑鼠,趾端生有锐利的指甲。爪能够缩进和伸出。猫在休息和行走时爪缩进去,只在捕鼠和攀爬时伸出来,防止指甲被磨钝。猫的前肢有五指,后肢有四指。"
},
{
image: "/images/cat.png",
title: "猫",
describe:
"猫的牙齿分为门齿、犬齿和臼齿。犬齿特别发达,尖锐如锥,适于咬死捕到的鼠类,臼齿的咀嚼面有尖锐的突起,适于把肉嚼碎;门齿不发达。"
}
]
}
}); | title: "猫",
describe: |
command_selection.rs | use crate::{styles::{self, ui_button}, ScpiCommand, devices, messages::{*, CommandMessage::*}};
use iced::{Container, Text, button, Length, Align, Row, Column, PickList, pick_list, TextInput, text_input};
/// Textfield, button and dropdown states held by the settings screen
#[derive(Default)]
pub struct CommandScreen {
/// List of [`ScpiCommand`]s
commands: Vec<ScpiCommand>,
/// Currently selected [`ScpiCommand`]
command_selection: ScpiCommand,
/// State of the picklist for commands
command_list_state: pick_list::State<ScpiCommand>,
/// State of the command-send button
send_button_state: button::State,
/// List of available channels as [`u8`]
channels: Vec<u8>,
/// Currently selected channel
channel_selection: u8,
/// State of the picklist for channels
channel_list: pick_list::State<u8>,
/// List of arguments for the command
arguments: Vec<String>,
/// Currently selected Argument
argument_selection: String,
/// State of the picklist for arguments
argument_list: pick_list::State<String>,
/// Content of the freetext textbox
freetext: String,
/// State of the freetext textbox
freetext_state: text_input::State,
/// Currently selected complete command
current_command: String,
/// Status message
status: String,
}
impl CommandScreen {
pub fn from(config: devices::Configuration) -> Self {
let mut command_screen = Self {
commands: config.commands.clone(),
command_selection: config.commands[0].clone(),
send_button_state: button::State::default(),
channels: (1..=config.device.channels.clone()).collect(),
channel_selection: 1,
arguments: config.commands[0].values.clone(),
argument_selection: config.commands[0].values[0].clone(),
status: "Choose a command to start!".into(),
..Default::default()
};
command_screen.current_command = command_screen.get_command();
command_screen
}
/// Display a the command selection screen using dropdown selection lists and textboxes
/// Provides information about the response the device sent.
pub fn view(&mut self) -> Container<Message> {
// Create a status text label from the current status
let status_text = Text::new(format!(
"{}", &self.status
)).size(20);
// Create a text label that displays the SCPI command that will be sent to the device
let command_text = Text::new(format!(
"{}", &self.current_command
)).size(40);
// Create a submit button that sends a message to the application root containing the selected SCPI command
let submit_button = ui_button(&mut self.send_button_state, "Submit".into(), styles::Button::Submit)
.on_press(Message::SendCommand(self.current_command.clone()));
// Create a textbox for freetext entry if the command argument contains a "<TXT>"
// if not, add an empty row to avoid re-arranging the UI whenever this switches
let freetext =
if self.argument_selection.contains("<TXT>") {
Row::new().push(
TextInput::new(&mut self.freetext_state, "TXT", &self.freetext, |txt| { Message::Command(CommandMessage::FreetextEntered(txt)) })
.width(Length::Units(150))
.style(styles::Textbox::Freetext)
.padding(5)
.on_submit(Message::SendCommand(self.current_command.clone()))
)
} else {
Row::new().push(iced::widget::Space::new(Length::Units(150), Length::Units(30)))
};
// Build the container from the above widgets and add the appropriate dropdown menus
Container::new(
Column::new()
.align_items(Align::Center)
.push(status_text)
.spacing(20)
.push(command_text)
.spacing(20)
.push(
Row::new().
push(
PickList::new(
&mut self.command_list_state,
&self.commands,
Some(self.command_selection.clone()),
|cmd| Message::Command(CommandMessage::CommandSelected(cmd)),
).width(Length::Units(200))
).spacing(20)
.push(
PickList::new(
&mut self.channel_list,
&self.channels,
Some(self.channel_selection.clone()),
|chan| Message::Command(CommandMessage::ChannelSelected(chan)),
).width(Length::Units(100))
).spacing(20)
.push(
PickList::new(
&mut self.argument_list,
&self.arguments,
Some(self.argument_selection.clone()),
|arg| Message::Command(CommandMessage::ArgumentSelected(arg)),
).width(Length::Units(200))
)
)
.push(freetext)
.push(
submit_button
)
)
.width(Length::Fill)
.height(Length::Fill)
.center_x()
.center_y()
}
/// Update the fields according to generated [`CommandMessage`]s
pub fn update(&mut self, message: CommandMessage) {
match message {
CommandSelected(cmd) => {
println!("{}", &cmd.name);
self.command_selection = cmd.clone();
self.arguments = cmd.values.clone();
self.argument_selection = self.arguments[0].clone();
self.argument_list = pick_list::State::default();
}
ChannelSelected(chan) => self.channel_selection = chan,
ArgumentSelected(arg) => self.argument_selection = arg,
FreetextEntered(txt) => self.freetext = txt,
}
self.current_command = self.get_command();
}
/// Returns a complete SCPI command from the selected/entered values on the screen
pub fn get_command(&self) -> String {
devices::command::make_scpi_command(self.command_selection.clone(), self.channel_selection, &self.argument_selection, &self.freetext).unwrap()
}
/// Set the status text that is displayed above the scpi command
pub fn | (&mut self, status: String) {
self.status = status
}
}
| set_status_text |
rep-egre-programa.routing.module.ts | import { NgModule } from '@angular/core';
import { Routes, RouterModule } from '@angular/router';
import { ReporteEgreProgComponent } from './rep-egre-programa.component';
const routes: Routes = [
{
path: '',
component: ReporteEgreProgComponent
}
];
@NgModule({
imports: [RouterModule.forChild(routes)],
exports: [RouterModule]
})
export class ReporteEgreProgRoutingModule {} | ||
lib.rs | use async_io::Timer;
use futures::channel::mpsc;
use futures::future::FutureExt;
use futures::stream::{Stream, StreamExt};
use std::collections::VecDeque;
use std::net::Ipv4Addr;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
mod addr;
mod packet;
mod range;
pub use packet::{Packet, Protocol};
pub use range::Ipv4Range;
#[derive(Clone, Copy, Debug)]
pub struct Ipv4Route {
dest: Ipv4Range,
gateway: Option<Ipv4Addr>,
}
impl Ipv4Route {
/// Create a new route with the given destination and gateway.
pub fn new(dest: Ipv4Range, gateway: Option<Ipv4Addr>) -> Self {
Self { dest, gateway }
}
/// Returns the destination IP range of the route.
pub fn dest(&self) -> Ipv4Range {
self.dest
}
/// Returns the route's gateway (if any).
pub fn gateway(&self) -> Option<Ipv4Addr> {
self.gateway
}
}
impl From<Ipv4Range> for Ipv4Route {
fn from(range: Ipv4Range) -> Self {
Self::new(range, None)
}
}
impl From<Ipv4Addr> for Ipv4Route {
fn from(addr: Ipv4Addr) -> Self {
Self::new(addr.into(), None)
}
}
#[derive(Debug)]
pub struct Plug {
tx: mpsc::UnboundedSender<Vec<u8>>,
rx: mpsc::UnboundedReceiver<Vec<u8>>,
}
impl Plug {
pub fn poll_incoming(&mut self, cx: &mut Context) -> Poll<Option<Vec<u8>>> {
Pin::new(&mut self.rx).poll_next(cx)
}
pub async fn incoming(&mut self) -> Option<Vec<u8>> {
self.rx.next().await
}
pub fn unbounded_send(&mut self, packet: Vec<u8>) {
let _ = self.tx.unbounded_send(packet);
}
pub fn split(
self,
) -> (
mpsc::UnboundedSender<Vec<u8>>,
mpsc::UnboundedReceiver<Vec<u8>>,
) {
(self.tx, self.rx)
}
}
pub fn wire() -> (Plug, Plug) {
let (a_tx, b_rx) = mpsc::unbounded();
let (b_tx, a_rx) = mpsc::unbounded();
let a = Plug { tx: a_tx, rx: a_rx };
let b = Plug { tx: b_tx, rx: b_rx };
(a, b)
}
#[derive(Clone, Copy, Debug)]
pub struct DelayBuffer {
delay: Duration,
buffer_size: usize,
}
impl Default for DelayBuffer {
fn default() -> Self {
Self::new()
}
}
impl DelayBuffer {
pub fn new() -> Self {
Self {
delay: Duration::from_millis(0),
buffer_size: usize::MAX,
}
}
pub fn set_delay(&mut self, delay: Duration) {
self.delay = delay;
}
pub fn | (&mut self, buffer_size: usize) {
self.buffer_size = buffer_size;
}
pub fn spawn(self, mut b: Plug) -> Plug {
#[allow(non_snake_case)]
let DURATION_MAX: Duration = Duration::from_secs(10000);
let (mut c, d) = wire();
async_global_executor::spawn(async move {
let mut b_tx_buffer_size = 0;
let mut b_tx_buffer = VecDeque::new();
let mut c_tx_buffer_size = 0;
let mut c_tx_buffer = VecDeque::new();
let mut idle = true;
let mut timer = Timer::after(DURATION_MAX);
loop {
futures::select! {
packet = b.incoming().fuse() => {
if let Some(packet) = packet {
if c_tx_buffer_size + packet.len() < self.buffer_size {
c_tx_buffer_size += packet.len();
let time = Instant::now();
c_tx_buffer.push_back((packet, time + self.delay));
if idle {
timer.set_after(self.delay);
idle = false;
}
}
} else {
break;
}
}
packet = c.incoming().fuse() => {
if let Some(packet) = packet {
if b_tx_buffer_size + packet.len() < self.buffer_size {
b_tx_buffer_size += packet.len();
let time = Instant::now();
b_tx_buffer.push_back((packet, time + self.delay));
if idle {
timer.set_after(self.delay);
idle = false;
}
}
} else {
break;
}
}
now = FutureExt::fuse(&mut timer) => {
let mut wtime = DURATION_MAX;
while let Some((packet, time)) = b_tx_buffer.front() {
if *time <= now {
b_tx_buffer_size -= packet.len();
b.unbounded_send(b_tx_buffer.pop_front().unwrap().0);
} else {
let bwtime = time.duration_since(now);
if wtime > bwtime {
wtime = bwtime;
}
break;
}
}
while let Some((packet, time)) = c_tx_buffer.front() {
if *time <= now {
c_tx_buffer_size -= packet.len();
c.unbounded_send(c_tx_buffer.pop_front().unwrap().0);
} else {
let cwtime = time.duration_since(now);
if wtime > cwtime {
wtime = cwtime;
}
break;
}
}
timer.set_after(wtime);
idle = wtime == DURATION_MAX
}
}
}
})
.detach();
d
}
}
#[cfg(test)]
mod tests {
use super::*;
#[async_std::test]
async fn test_delay() {
let (mut a, b) = wire();
let mut w = DelayBuffer::new();
w.set_delay(Duration::from_millis(100));
let mut b = w.spawn(b);
let now = Instant::now();
a.unbounded_send(vec![1]);
a.unbounded_send(vec![2]);
async_std::task::sleep(Duration::from_millis(10)).await;
a.unbounded_send(vec![3]);
a.unbounded_send(vec![4]);
b.incoming().await;
println!("{:?}", now.elapsed());
assert!(now.elapsed() >= Duration::from_millis(100));
assert!(now.elapsed() < Duration::from_millis(102));
b.incoming().await;
println!("{:?}", now.elapsed());
assert!(now.elapsed() >= Duration::from_millis(100));
assert!(now.elapsed() < Duration::from_millis(102));
b.incoming().await;
println!("{:?}", now.elapsed());
assert!(now.elapsed() >= Duration::from_millis(110));
assert!(now.elapsed() < Duration::from_millis(112));
b.incoming().await;
println!("{:?}", now.elapsed());
assert!(now.elapsed() >= Duration::from_millis(110));
assert!(now.elapsed() < Duration::from_millis(112));
}
}
| set_buffer_size |
libcore_iter_rs_0004.rs | fn | () {
let a = [0];
let b = [1];
let mut it = a.iter().chain(&b);
assert_eq!(it.next(), Some(&0));
assert_eq!(it.next(), Some(&1));
assert!(it.next().is_none());
}
| main |
native.rs | use super::{SqlError, SqlResult};
use datamodel::common::preview_features::PreviewFeature;
use enumflags2::BitFlags;
use migration_connector::{ConnectorError, ConnectorResult};
use quaint::{
connector::{Mysql, MysqlUrl, PostgreSql, PostgresUrl},
error::Error as QuaintError,
prelude::{ConnectionInfo, Query, Queryable, ResultSet},
single::Quaint,
};
use sql_schema_describer::{DescriberErrorKind, SqlSchema, SqlSchemaDescriberBackend};
use std::sync::Arc;
use user_facing_errors::{introspection_engine::DatabaseSchemaInconsistent, KnownError};
pub(crate) async fn connect(connection_string: &str) -> ConnectorResult<Connection> {
let connection_info = ConnectionInfo::from_url(connection_string).map_err(|err| {
let details = user_facing_errors::quaint::invalid_connection_string_description(&err.to_string());
KnownError::new(user_facing_errors::common::InvalidConnectionString { details })
})?;
if let ConnectionInfo::Postgres(url) = &connection_info {
return quaint::connector::PostgreSql::new(url.clone())
.await
.map(|conn| Connection::new_postgres(conn, url.clone()))
.map_err(|err| quaint_error_to_connector_error(err, &connection_info));
}
if let ConnectionInfo::Mysql(url) = &connection_info {
return quaint::connector::Mysql::new(url.clone())
.await
.map(|conn| Connection::new_mysql(conn, url.clone()))
.map_err(|err| quaint_error_to_connector_error(err, &connection_info));
}
let connection = Quaint::new(connection_string)
.await
.map_err(|err| quaint_error_to_connector_error(err, &connection_info))?;
Ok(Connection::new_generic(connection))
}
pub(crate) fn quaint_error_to_connector_error(error: QuaintError, connection_info: &ConnectionInfo) -> ConnectorError {
match user_facing_errors::quaint::render_quaint_error(error.kind(), connection_info) {
Some(user_facing_error) => user_facing_error.into(),
None => {
let msg = error
.original_message()
.map(String::from)
.unwrap_or_else(|| error.to_string());
ConnectorError::from_msg(msg)
}
}
}
fn sql_error(quaint_error: QuaintError, connection_info: &ConnectionInfo) -> SqlError {
let error_code = quaint_error.original_code().map(String::from);
super::SqlError {
connector_error: quaint_error_to_connector_error(quaint_error, connection_info),
src_position: None,
src_statement: None,
error_code,
}
}
/// An internal helper for the SQL connector. It wraps a `Quaint` struct and
/// exposes a similar API, with additional error handling to return
/// `ConnectorResult`s.
#[derive(Clone, Debug)]
pub(crate) struct Connection(ConnectionInner, ConnectionInfo);
#[derive(Clone, Debug)]
enum ConnectionInner {
Postgres(Arc<(quaint::connector::PostgreSql, PostgresUrl)>),
Mysql(Arc<(quaint::connector::Mysql, MysqlUrl)>),
Generic(Quaint),
}
impl Connection {
pub(crate) fn new_generic(quaint: Quaint) -> Self {
let connection_info = quaint.connection_info().to_owned();
Connection(ConnectionInner::Generic(quaint), connection_info)
}
fn new_postgres(conn: PostgreSql, url: PostgresUrl) -> Self {
Connection(
ConnectionInner::Postgres(Arc::new((conn, url.clone()))),
ConnectionInfo::Postgres(url),
)
}
fn new_mysql(conn: Mysql, url: MysqlUrl) -> Self {
Connection(
ConnectionInner::Mysql(Arc::new((conn, url.clone()))),
ConnectionInfo::Mysql(url),
)
}
pub(crate) fn connection_info(&self) -> &ConnectionInfo {
&self.1
}
fn queryable(&self) -> &dyn Queryable {
match &self.0 { | }
}
pub(crate) async fn describe_schema(
&self,
preview_features: BitFlags<PreviewFeature>,
) -> ConnectorResult<SqlSchema> {
let connection_info = self.connection_info();
let mut schema = match connection_info {
ConnectionInfo::Postgres(_) => {
sql_schema_describer::postgres::SqlSchemaDescriber::new(self.queryable(), Default::default())
.describe(connection_info.schema_name())
.await
.map_err(|err| match err.into_kind() {
DescriberErrorKind::QuaintError(err) => quaint_error_to_connector_error(err, connection_info),
e @ DescriberErrorKind::CrossSchemaReference { .. } => {
let err = KnownError::new(DatabaseSchemaInconsistent {
explanation: format!("{}", e),
});
ConnectorError::from(err)
}
})?
}
ConnectionInfo::Mysql(_) => sql_schema_describer::mysql::SqlSchemaDescriber::new(self.queryable())
.describe(connection_info.schema_name())
.await
.map_err(|err| match err.into_kind() {
DescriberErrorKind::QuaintError(err) => quaint_error_to_connector_error(err, connection_info),
DescriberErrorKind::CrossSchemaReference { .. } => {
unreachable!("No schemas on MySQL")
}
})?,
ConnectionInfo::Mssql(_) => sql_schema_describer::mssql::SqlSchemaDescriber::new(self.queryable())
.describe(connection_info.schema_name())
.await
.map_err(|err| match err.into_kind() {
DescriberErrorKind::QuaintError(err) => quaint_error_to_connector_error(err, connection_info),
e @ DescriberErrorKind::CrossSchemaReference { .. } => {
let err = KnownError::new(DatabaseSchemaInconsistent {
explanation: e.to_string(),
});
ConnectorError::from(err)
}
})?,
ConnectionInfo::Sqlite { .. } | ConnectionInfo::InMemorySqlite { .. } => {
sql_schema_describer::sqlite::SqlSchemaDescriber::new(self.queryable())
.describe(connection_info.schema_name())
.await
.map_err(|err| match err.into_kind() {
DescriberErrorKind::QuaintError(err) => quaint_error_to_connector_error(err, connection_info),
DescriberErrorKind::CrossSchemaReference { .. } => {
unreachable!("No schemas on SQLite")
}
})?
}
};
// Remove this when the feature is GA
if !preview_features.contains(PreviewFeature::ExtendedIndexes) {
filter_extended_index_capabilities(&mut schema);
}
Ok(schema)
}
pub(crate) async fn query(&self, query: impl Into<Query<'_>>) -> SqlResult<ResultSet> {
self.queryable()
.query(query.into())
.await
.map_err(|quaint_error| sql_error(quaint_error, self.connection_info()))
}
pub(crate) async fn query_raw(&self, sql: &str, params: &[quaint::Value<'_>]) -> SqlResult<ResultSet> {
self.queryable()
.query_raw(sql, params)
.await
.map_err(|quaint_error| sql_error(quaint_error, self.connection_info()))
}
pub(crate) async fn raw_cmd(&self, sql: &str) -> SqlResult<()> {
self.queryable()
.raw_cmd(sql)
.await
.map_err(|quaint_error| sql_error(quaint_error, self.connection_info()))
}
pub(crate) async fn version(&self) -> SqlResult<Option<String>> {
self.queryable()
.version()
.await
.map_err(|quaint_error| sql_error(quaint_error, self.connection_info()))
}
pub(crate) fn unwrap_postgres(&self) -> &(PostgreSql, PostgresUrl) {
match &self.0 {
ConnectionInner::Postgres(inner) => inner,
other => panic!("{:?} in Connection::unwrap_postgres()", other),
}
}
pub(crate) fn unwrap_mysql(&self) -> &(Mysql, MysqlUrl) {
match &self.0 {
ConnectionInner::Mysql(inner) => &**inner,
other => panic!("{:?} in Connection::unwrap_mysql()", other),
}
}
}
fn filter_extended_index_capabilities(schema: &mut SqlSchema) {
for (_, table) in schema.iter_tables_mut() {
let mut pk_removal = false;
if let Some(ref mut pk) = &mut table.primary_key {
for col in pk.columns.iter_mut() {
if col.length.is_some() {
pk_removal = true;
}
col.length = None;
col.sort_order = None;
}
}
if pk_removal {
table.primary_key = None;
}
let mut kept_indexes = Vec::new();
while let Some(mut index) = table.indices.pop() {
let mut remove_index = false;
for col in index.columns.iter_mut() {
if col.length.is_some() {
remove_index = true;
}
col.sort_order = None;
}
if !remove_index {
kept_indexes.push(index);
}
}
kept_indexes.reverse();
table.indices = kept_indexes;
}
} | ConnectionInner::Postgres(pg) => &pg.0,
ConnectionInner::Mysql(my) => &my.0,
ConnectionInner::Generic(q) => q, |
join.go | package aspect
import (
"fmt"
)
type JoinOnStmt struct {
ArrayClause
method string
table *TableElem
}
func (j JoinOnStmt) String() string {
compiled, _ := j.Compile(&defaultDialect{}, Params())
return compiled
}
func (j JoinOnStmt) Compile(d Dialect, params *Parameters) (string, error) {
// Compile the clauses of the join statement
clauses, err := j.ArrayClause.Compile(d, params)
if err != nil {
return "", err
}
return fmt.Sprintf(
` %s %s ON %s`,
j.method,
j.table.Compile(d, params),
clauses,
), nil
}
// JoinStmt is an internal representation of a JOIN.
// It is broken and deprecated.
type JoinStmt struct {
method string
table *TableElem
pre, post ColumnElem
}
// Compile will compile the JOIN statement according to the given dialect.
func (j JoinStmt) Compile(d Dialect, params *Parameters) (string, error) {
prec, err := j.pre.Compile(d, params)
if err != nil {
return "", err
}
postc, err := j.post.Compile(d, params)
if err != nil |
compiled := fmt.Sprintf(
` %s "%s" ON %s = %s`,
j.method,
j.table.Name,
prec,
postc,
)
return compiled, nil
}
| {
return "", err
} |
cron_articles.py |
import asyncio
import aiohttp
import requests
from top_articles.models import Story
from django.core.exceptions import ObjectDoesNotExist
def check_db_story_ids(articlesID_list):
new_articleID_list = []
for id in articlesID_list:
try:
Story.objects.get(id=id)
except ObjectDoesNotExist:
new_articleID_list.append(id)
return new_articleID_list
def get_article_urls(url):
"""
Fetch all ids of top trending articles
args: None
return:None
"""
articlesID_list = requests.get(
url).json()
url_list = []
print("article length",len(articlesID_list))
newarticlesID_list = check_db_story_ids(articlesID_list)
for id in newarticlesID_list:
url ="https://hacker-news.firebaseio.com/v0/item/%s.json?print=pretty" % id
url_list.append(url)
return url_list, articlesID_list, newarticlesID_list
async def fetch_url(session, url):
async with session.get(url, timeout=60 * 60) as response:
return await response.json()
async def | (session, urls, loop):
results = await asyncio.gather(*[fetch_url(session, url) for url in urls],
return_exceptions=True)
return results
def fetch_articles(urls):
if len(urls) > 1:
loop = asyncio.get_event_loop()
connector = aiohttp.TCPConnector(limit=100)
with aiohttp.ClientSession(loop=loop, connector=connector) as session:
articles = loop.run_until_complete(fetch_all_urls(session, urls, loop))
raw_result = articles
return raw_result
else:
return None
def main(url):
urls_list, articlesID_list, newarticlesID_list = get_article_urls(url)
print(urls_list, articlesID_list, newarticlesID_list)
result_dict = fetch_articles(urls_list)
return result_dict, articlesID_list, newarticlesID_list
| fetch_all_urls |
google.go | //
// Copyright 2021 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google
import (
"context"
"os"
"strings"
"google.golang.org/api/idtoken"
"google.golang.org/api/impersonate"
"github.com/sigstore/cosign/pkg/providers"
)
func init() |
type googleWorkloadIdentity struct{}
var _ providers.Interface = (*googleWorkloadIdentity)(nil)
// gceProductNameFile is the product file path that contains the cloud service name.
// This is a variable instead of a const to enable testing.
var gceProductNameFile = "/sys/class/dmi/id/product_name"
// Enabled implements providers.Interface
// This is based on k8s.io/kubernetes/pkg/credentialprovider/gcp
func (gwi *googleWorkloadIdentity) Enabled(ctx context.Context) bool {
data, err := os.ReadFile(gceProductNameFile)
if err != nil {
return false
}
name := strings.TrimSpace(string(data))
if name == "Google" || name == "Google Compute Engine" {
// Just because we're on Google, does not mean workload identity is available.
// TODO(mattmoor): do something better than this.
_, err := gwi.Provide(ctx, "garbage")
return err == nil
}
return false
}
// Provide implements providers.Interface
func (gwi *googleWorkloadIdentity) Provide(ctx context.Context, audience string) (string, error) {
ts, err := idtoken.NewTokenSource(ctx, audience)
if err != nil {
return "", err
}
tok, err := ts.Token()
if err != nil {
return "", err
}
return tok.AccessToken, nil
}
type googleImpersonate struct{}
var _ providers.Interface = (*googleImpersonate)(nil)
// Enabled implements providers.Interface
func (gi *googleImpersonate) Enabled(ctx context.Context) bool {
// The "impersonate" method requires a target service account to impersonate.
return os.Getenv("GOOGLE_SERVICE_ACCOUNT_NAME") != ""
}
// Provide implements providers.Interface
func (gi *googleImpersonate) Provide(ctx context.Context, audience string) (string, error) {
target := os.Getenv("GOOGLE_SERVICE_ACCOUNT_NAME")
ts, err := impersonate.IDTokenSource(ctx, impersonate.IDTokenConfig{
Audience: audience,
TargetPrincipal: target,
IncludeEmail: true,
})
if err != nil {
return "", err
}
tok, err := ts.Token()
if err != nil {
return "", err
}
return tok.AccessToken, nil
}
| {
providers.Register("google-workload-identity", &googleWorkloadIdentity{})
providers.Register("google-impersonate", &googleImpersonate{})
} |
iam.py | from . import *
class AWS_IAM_Role_Policy(CloudFormationProperty):
def write(self, w):
with w.block("policy"):
self.property(w, "PolicyDocument", "policy_document", JsonValueConverter())
self.property(w, "PolicyName", "policy_name", StringValueConverter())
class AWS_IAM_Group_Policy(CloudFormationProperty):
def write(self, w):
with w.block("policy"):
self.property(w, "PolicyDocument", "policy_document", JsonValueConverter())
self.property(w, "PolicyName", "policy_name", StringValueConverter())
class AWS_IAM_User_LoginProfile(CloudFormationProperty):
def write(self, w):
with w.block("login_profile"):
self.property(w, "Password", "password", StringValueConverter())
self.property(w, "PasswordResetRequired", "password_reset_required", BasicValueConverter())
class AWS_IAM_User_Policy(CloudFormationProperty):
def write(self, w):
with w.block("policy"):
self.property(w, "PolicyDocument", "policy_document", JsonValueConverter())
self.property(w, "PolicyName", "policy_name", StringValueConverter())
class AWS_IAM_Group(CloudFormationResource):
cfn_type = "AWS::IAM::Group"
tf_type = "aws_iam_group"
ref = "id"
attrs = {
"Arn": "arn",
# Additional TF attributes: unique_id
}
def write(self, w):
with self.resource_block(w):
self.property(w, "GroupName", "name", StringValueConverter())
self.property(w, "ManagedPolicyArns", "arn", ListValueConverter(StringValueConverter()))
self.property(w, "Path", "path", StringValueConverter())
self.repeated_block(w, "Policies", AWS_IAM_Group_Policy) # TODO: Probably not the correct mapping
class AWS_IAM_Policy(CloudFormationResource):
cfn_type = "AWS::IAM::Policy"
tf_type = "aws_iam_policy_attachment"
ref = "id"
attrs = {}
def write(self, w):
with self.resource_block(w):
self.property(w, "Groups", "groups", ListValueConverter(StringValueConverter())) | self.property(w, "Users", "users", ListValueConverter(StringValueConverter()))
class AWS_IAM_ServiceLinkedRole(CloudFormationResource):
cfn_type = "AWS::IAM::ServiceLinkedRole"
tf_type = "aws_iam_service_linked_role"
ref = "id"
attrs = {} # Additional TF attributes: arn, create_date, name, path, unique_id
def write(self, w):
with self.resource_block(w):
self.property(w, "CustomSuffix", "custom_suffix", StringValueConverter())
self.property(w, "Description", "description", StringValueConverter())
self.property(w, "AWSServiceName", "aws_service_name", StringValueConverter())
class AWS_IAM_AccessKey(CloudFormationResource):
cfn_type = "AWS::IAM::AccessKey"
tf_type = "aws_iam_access_key"
ref = "id"
attrs = {
"SecretAccessKey": "secret",
# Additional TF attributes: encrypted_secret, key_fingerprint, ses_smtp_password, ses_smtp_password_v4, status
}
def write(self, w):
with self.resource_block(w):
self.property(w, "Serial", "serial", BasicValueConverter()) # TODO: Probably not the correct mapping
self.property(w, "Status", "status", StringValueConverter())
self.property(w, "UserName", "user", StringValueConverter())
class AWS_IAM_User(CloudFormationResource):
cfn_type = "AWS::IAM::User"
tf_type = "aws_iam_user_group_membership"
ref = "id"
attrs = {
"Arn": "arn", # TODO: Probably not the correct mapping
}
def write(self, w):
with self.resource_block(w):
self.property(w, "Groups", "groups", ListValueConverter(StringValueConverter()))
self.block(w, "LoginProfile", AWS_IAM_User_LoginProfile) # TODO: Probably not the correct mapping
self.property(w, "ManagedPolicyArns", "managed_policy_arns", ListValueConverter(StringValueConverter())) # TODO: Probably not the correct mapping
self.property(w, "Path", "path", StringValueConverter()) # TODO: Probably not the correct mapping
self.property(w, "PermissionsBoundary", "permissions_boundary", StringValueConverter()) # TODO: Probably not the correct mapping
self.repeated_block(w, "Policies", AWS_IAM_User_Policy) # TODO: Probably not the correct mapping
self.property(w, "Tags", "tags", ListValueConverter(ResourceTag())) # TODO: Probably not the correct mapping
self.property(w, "UserName", "user", StringValueConverter())
class AWS_IAM_Role(CloudFormationResource):
cfn_type = "AWS::IAM::Role"
tf_type = "aws_iam_role"
ref = "id"
attrs = {
"Arn": "arn",
"RoleId": "id",
# Additional TF attributes: create_date, name, unique_id
}
def write(self, w):
with self.resource_block(w):
self.property(w, "AssumeRolePolicyDocument", "assume_role_policy", JsonValueConverter())
self.property(w, "Description", "description", StringValueConverter())
self.property(w, "ManagedPolicyArns", "arn", ListValueConverter(StringValueConverter()))
self.property(w, "MaxSessionDuration", "max_session_duration", BasicValueConverter())
self.property(w, "Path", "path", StringValueConverter())
self.property(w, "PermissionsBoundary", "permissions_boundary", StringValueConverter())
self.repeated_block(w, "Policies", AWS_IAM_Role_Policy)
self.property(w, "RoleName", "name", StringValueConverter())
self.property(w, "Tags", "tags", ListValueConverter(ResourceTag()))
class AWS_IAM_UserToGroupAddition(CloudFormationResource):
cfn_type = "AWS::IAM::UserToGroupAddition"
tf_type = "aws_iam_user"
ref = "id"
attrs = {} # Additional TF attributes: arn, unique_id
def write(self, w):
with self.resource_block(w):
self.property(w, "GroupName", "name", StringValueConverter())
self.property(w, "Users", "users", ListValueConverter(StringValueConverter())) # TODO: Probably not the correct mapping
class AWS_IAM_InstanceProfile(CloudFormationResource):
cfn_type = "AWS::IAM::InstanceProfile"
tf_type = "aws_iam_instance_profile"
ref = "id"
attrs = {
"Arn": "arn",
# Additional TF attributes: create_date, name, role, roles, unique_id
}
def write(self, w):
with self.resource_block(w):
self.property(w, "InstanceProfileName", "name", StringValueConverter())
self.property(w, "Path", "path", StringValueConverter())
self.property(w, "Roles", "roles", ListValueConverter(StringValueConverter()))
class AWS_IAM_ManagedPolicy(CloudFormationResource):
cfn_type = "AWS::IAM::ManagedPolicy"
tf_type = "aws_iam_managed_policy" # TODO: Most likely not working
ref = "arn"
attrs = {}
def write(self, w):
with self.resource_block(w):
self.property(w, "Description", "description", StringValueConverter())
self.property(w, "Groups", "groups", ListValueConverter(StringValueConverter()))
self.property(w, "ManagedPolicyName", "managed_policy_name", StringValueConverter())
self.property(w, "Path", "path", StringValueConverter())
self.property(w, "PolicyDocument", "policy_document", JsonValueConverter())
self.property(w, "Roles", "roles", ListValueConverter(StringValueConverter()))
self.property(w, "Users", "users", ListValueConverter(StringValueConverter())) | self.property(w, "PolicyDocument", "policy_document", JsonValueConverter()) # TODO: Probably not the correct mapping
self.property(w, "PolicyName", "name", StringValueConverter())
self.property(w, "Roles", "roles", ListValueConverter(StringValueConverter())) |
utils.py | import numpy as np
import astropy.nddata
from astropy.io import fits
from astropy.io.fits import CompImageHDU, HDUList, Header, ImageHDU, PrimaryHDU
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS
__all__ = ['parse_input_data', 'parse_input_shape', 'parse_input_weights',
'parse_output_projection']
def parse_input_data(input_data, hdu_in=None):
"""
Parse input data to return a Numpy array and WCS object.
"""
if isinstance(input_data, str):
return parse_input_data(fits.open(input_data), hdu_in=hdu_in)
elif isinstance(input_data, HDUList):
if hdu_in is None:
if len(input_data) > 1:
raise ValueError("More than one HDU is present, please specify "
"HDU to use with ``hdu_in=`` option")
else:
hdu_in = 0
return parse_input_data(input_data[hdu_in])
elif isinstance(input_data, (PrimaryHDU, ImageHDU, CompImageHDU)):
return input_data.data, WCS(input_data.header)
elif isinstance(input_data, tuple) and isinstance(input_data[0], np.ndarray):
if isinstance(input_data[1], Header):
return input_data[0], WCS(input_data[1])
else:
return input_data
elif isinstance(input_data, astropy.nddata.NDDataBase):
return input_data.data, input_data.wcs
else:
raise TypeError("input_data should either be an HDU object or a tuple "
"of (array, WCS) or (array, Header)")
def parse_input_shape(input_shape, hdu_in=None):
"""
Parse input shape information to return an array shape tuple and WCS object.
"""
if isinstance(input_shape, str):
return parse_input_shape(fits.open(input_shape), hdu_in=hdu_in)
elif isinstance(input_shape, HDUList):
if hdu_in is None:
if len(input_shape) > 1:
raise ValueError("More than one HDU is present, please specify "
"HDU to use with ``hdu_in=`` option")
else:
hdu_in = 0
return parse_input_shape(input_shape[hdu_in])
elif isinstance(input_shape, (PrimaryHDU, ImageHDU, CompImageHDU)):
return input_shape.shape, WCS(input_shape.header)
elif isinstance(input_shape, tuple) and isinstance(input_shape[0], np.ndarray):
if isinstance(input_shape[1], Header):
return input_shape[0].shape, WCS(input_shape[1])
else:
return input_shape[0].shape, input_shape[1]
elif isinstance(input_shape, tuple) and isinstance(input_shape[0], tuple):
if isinstance(input_shape[1], Header):
return input_shape[0], WCS(input_shape[1])
else:
return input_shape
elif isinstance(input_shape, astropy.nddata.NDDataBase):
return input_shape.data.shape, input_shape.wcs
else:
raise TypeError("input_shape should either be an HDU object or a tuple "
"of (array-or-shape, WCS) or (array-or-shape, Header)")
def parse_input_weights(input_weights, hdu_weights=None):
|
def parse_output_projection(output_projection, shape_out=None, output_array=None):
if shape_out is None:
if output_array is not None:
shape_out = output_array.shape
elif shape_out is not None and output_array is not None:
if shape_out != output_array.shape:
raise ValueError("shape_out does not match shape of output_array")
if isinstance(output_projection, Header):
wcs_out = WCS(output_projection)
try:
shape_out = [output_projection['NAXIS{}'.format(i + 1)]
for i in range(output_projection['NAXIS'])][::-1]
except KeyError:
if shape_out is None:
raise ValueError("Need to specify shape since output header "
"does not contain complete shape information")
elif isinstance(output_projection, BaseHighLevelWCS):
wcs_out = output_projection
if shape_out is None:
raise ValueError("Need to specify shape_out when specifying "
"output_projection as WCS object")
elif isinstance(output_projection, str):
hdu_list = fits.open(output_projection)
shape_out = hdu_list[0].data.shape
header = hdu_list[0].header
wcs_out = WCS(header)
hdu_list.close()
else:
raise TypeError('output_projection should either be a Header, a WCS '
'object, or a filename')
if len(shape_out) == 0:
raise ValueError("The shape of the output image should not be an "
"empty tuple")
return wcs_out, shape_out
| """
Parse input weights to return a Numpy array.
"""
if isinstance(input_weights, str):
return parse_input_data(fits.open(input_weights), hdu_in=hdu_weights)[0]
elif isinstance(input_weights, HDUList):
if hdu_weights is None:
if len(input_weights) > 1:
raise ValueError("More than one HDU is present, please specify "
"HDU to use with ``hdu_weights=`` option")
else:
hdu_weights = 0
return parse_input_data(input_weights[hdu_weights])[0]
elif isinstance(input_weights, (PrimaryHDU, ImageHDU, CompImageHDU)):
return input_weights.data
elif isinstance(input_weights, np.ndarray):
return input_weights
else:
raise TypeError("input_weights should either be an HDU object or a Numpy array") |
linux-inject.ts | import injection from "../util/injection.js";
import platform from "../util/platform.js";
import minimist from "minimist";
const argv: { _: string[], d?: string, dir?: string } = minimist(process.argv.slice(2)); | // Gets passed directory
const dir: string = argv.d || argv.dir;
if(dir) injection(platform.dir, argv.d || argv.dir);
else throw new Error("Expected -d or --dir flag with ReGuilded's directory"); | |
extractor.py | import codecs
import json
import os
import numpy as np
from nlplingo.nn.sequence_model import SequenceXLMRBase, SequenceXLMRCustom
from nlplingo.nn.spanpair_model import SpanPairModelEmbedded
from nlplingo.tasks.entitycoref.feature import EntityCorefFeatureGenerator
from nlplingo.tasks.entitycoref.generator import EntityCorefExampleGenerator
from nlplingo.tasks.eventcoref.feature import EventCorefFeatureGenerator
from nlplingo.tasks.eventcoref.generator import EventCorefExampleGenerator
from nlplingo.tasks.eventpair.feature import EventPairFeatureGenerator
from nlplingo.tasks.eventpair.generator import EventPairExampleGenerator
from nlplingo.tasks.eventframe.feature import EventFramePairFeatureGenerator
from nlplingo.tasks.eventframe.generator import EventFramePairExampleGenerator
from keras.models import load_model as keras_load_model
from keras.models import Model as KerasModel
from nlplingo.tasks.eventargument.feature import EventArgumentFeatureGenerator
from nlplingo.tasks.eventargument.generator import EventArgumentExampleGenerator
from nlplingo.tasks.eventrelation.feature import EventEventRelationFeatureGenerator
from nlplingo.tasks.eventrelation.generator import EventEventRelationExampleGenerator
from nlplingo.tasks.entityrelation.feature import EntityRelationFeatureGenerator
from nlplingo.tasks.entityrelation.generator import EntityRelationExampleGenerator
from nlplingo.tasks.event_domain import EventDomain
from nlplingo.tasks.eventtrigger.feature import EventTriggerFeatureGenerator
from nlplingo.tasks.eventtrigger.generator import EventTriggerExampleGenerator
from nlplingo.nn.argument_model import CNNArgumentModel
from nlplingo.nn.argument_model import GNNArgumentModel
from nlplingo.nn.argument_model import MultiLayerArgumentModelEmbedded, WithinSentenceArgumentModel
from nlplingo.nn.extraction_model import ExtractionModel
from nlplingo.nn.keras_models.common import keras_custom_objects
from nlplingo.nn.trigger_model import CNNTriggerModel
from nlplingo.nn.trigger_model import MultiLayerTriggerModelEmbedded
from nlplingo.nn.eventpair_model import EventPairModelEmbeddedTrigger
from nlplingo.nn.event_event_relation_model import MultiLayerEventEventRelationModel, WithinSentenceEER, EventEventRelationStackedOpenNREModel, WithinSentenceEERGeneral
from nlplingo.nn.entity_entity_relation_model import MultiLayerEntityRelationModelEmbedded, WithinSentenceEntityRelationModel
from nlplingo.nn.eventframepair_model import EventFramePairModelEmbedded
from nlplingo.nn.hyperparameters import HyperParameters
from nlplingo.tasks.common.feature.feature_setting import FeatureSetting
import logging
from nlplingo.tasks.sequence.feature import SequenceFeatureGenerator
from nlplingo.tasks.sequence.generator import SequenceExampleGenerator
logger = logging.getLogger(__name__)
class Extractor(object):
trigger_model_table = {
'event-trigger_cnn': CNNTriggerModel,
'event-trigger_cnn-embedded': CNNTriggerModel,
'event-trigger_multilayer-embedded': MultiLayerTriggerModelEmbedded,
'event-trigger_sentence-embedded': MultiLayerTriggerModelEmbedded,
}
argument_model_table = {
'event-argument_cnn': CNNArgumentModel,
'event-argument_cnn-embedded': CNNArgumentModel,
'event-argument_gnn': GNNArgumentModel,
'event-argument_multilayer-embedded': MultiLayerArgumentModelEmbedded,
'event-argument_bert-mention': WithinSentenceArgumentModel
}
eer_model_table = {
'event-event-relation_multilayer': MultiLayerEventEventRelationModel,
'event-event-relation_multilayer-embedded': MultiLayerEventEventRelationModel,
'event-event-relation_two_models_with_postprocessing': EventEventRelationStackedOpenNREModel,
'event-event-relation_cnn-embedded': WithinSentenceEER, # This exists for legacy reasons
'event-event-relation_within-sentence': WithinSentenceEER,
'event-event-relation_general_decode-embedded': WithinSentenceEERGeneral
}
entity_relation_model_table = {
'entity-entity-relation_multilayer-embedded': MultiLayerEntityRelationModelEmbedded,
'entity-entity-relation_bert-mention': WithinSentenceEntityRelationModel
}
eventpair_model_table = {
'event-pair_embedded': SpanPairModelEmbedded,
'event-pair_embedded_trigger': EventPairModelEmbeddedTrigger
}
eventframepair_model_table = {
'event-framepair_embedded': EventFramePairModelEmbedded
}
entitycoref_model_table = {
'entitycoref_embedded': SpanPairModelEmbedded
}
eventcoref_model_table = {
'eventcoref_embedded': SpanPairModelEmbedded
}
sequence_model_table = {
'sequence_xlmr-base': SequenceXLMRBase,
'sequence_xlmr-custom': SequenceXLMRCustom
}
def __init__(self, params, extractor_params, embeddings, load_from_file=False):
"""
:type params: dict # general parameters
:type extractor_params: dict # specific to this extractor
:type embeddings: dict[str : nlplingo.embeddings.word_embeddings.WordEmbedding]
"""
self.extractor_params = extractor_params
self.extractor_name = extractor_params.get('extractor_name', None)
self.task = extractor_params.get('task', None)
self.engine = extractor_params.get('engine', None)
self.model_type = extractor_params['model_type']
""":type: str"""
self.domain = EventDomain.read_domain_ontology_file(extractor_params['domain_ontology'],
domain_name=extractor_params.get('domain_name', 'general'))
""":type: nlplingo.tasks.event_domain.EventDomain"""
self.domain.build_prior(extractor_params.get('ontology_yaml'))
self.model_file = extractor_params['model_file']
""":type: str"""
self.class_thresholds_path = extractor_params.get('class_thresholds')
""":type: str"""
self.class_thresholds_global = float(
extractor_params.get('class_thresholds_global', -1.0))
""":type: float"""
self.use_trigger_safelist = extractor_params.get('trigger.use_safelist', False)
if 'engine' not in extractor_params or (('engine' in extractor_params) and (extractor_params['engine'] == 'keras')):
self.hyper_parameters = HyperParameters(extractor_params['hyper-parameters'], load_from_file)
elif extractor_params['engine'] == 'pytorch':
self.hyper_parameters = HyperParameters(extractor_params['hyper-parameters'], load_from_file)
# elif extractor_params['engine'] == 'transformers':
# pass
else:
raise RuntimeError('Extractor model type: {} not implemented.'.format(self.model_type))
""":type: nlplingo.nn.extractor.HyperParameters"""
self.feature_setting = FeatureSetting(self.extractor_params['features'])
self.extraction_model = None
if self.model_type in self.trigger_model_table:
self.extraction_model = self.trigger_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.argument_model_table:
self.extraction_model = self.argument_model_table[self.model_type](params, extractor_params, self.domain,
embeddings, self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.eventpair_model_table:
self.extraction_model = self.eventpair_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting) # TODO: fix this model
elif self.model_type in self.eer_model_table:
self.extraction_model = self.eer_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.entity_relation_model_table:
self.extraction_model = self.entity_relation_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.eventframepair_model_table:
self.extraction_model = self.eventframepair_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting) # TODO: fix this model
elif self.model_type in self.entitycoref_model_table:
self.extraction_model = self.entitycoref_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.eventcoref_model_table:
self.extraction_model = self.eventcoref_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters,
self.feature_setting)
elif self.model_type in self.sequence_model_table:
if self.task == 'event-trigger':
self.domain.create_sequence_types(self.domain.event_types)
elif self.task == 'event-argument':
self.domain.create_sequence_types(self.domain.event_roles)
elif self.task == 'ner':
self.domain.create_sequence_types(self.domain.entity_types)
self.extraction_model = self.sequence_model_table[self.model_type](params, extractor_params, self.domain, embeddings,
self.hyper_parameters, self.feature_setting)
elif self.model_type.startswith('oregon'): # TODO hack, until YS has time to properly integrate after BETTER eval
pass
else:
raise RuntimeError('Extractor model type: {} not implemented.'.format(self.model_type))
""":type: nlplingo.nn.event_model.ExtractionModel"""
# TODO: extend this to support EventEventRelation models
if load_from_file:
logging.info('Loading previously trained model')
if extractor_params.get('engine', None) == 'keras':
self.load_keras()
if extractor_params.get('engine', None) is None: # TODO use framework
self.load_keras()
elif extractor_params['engine'] == 'pytorch':
pass
# elif extractor_params['engine'] == 'transformers':
# pass
else:
raise Exception(
'Only Keras or PyTorch engines are supported.')
#if ('engine' in extractor_params) and (extractor_params['engine'] == 'pytorch'):
# if load_from_file or self.extraction_model.hyper_params.load:
# pass
"""
self.extraction_model.hyper_params.num_class = self.extraction_model.num_output
if self.extraction_model.word_embeddings is not None:
trainer = self.extraction_model.model(self.extraction_model.extractor_params, self.extraction_model.hyper_params.dict, self.extraction_model.optimizer,
feature_names=self.extraction_model.features.feature_strings, emb_matrix=self.extraction_model.word_embeddings)
else: # frozen, external embedding case
if self.extraction_model.embeddings_vector_size is not None:
self.extraction_model.hyper_params.dict['emb_dim'] = self.extraction_model.embeddings_vector_size
trainer = self.extraction_model.model(self.extraction_model.extractor_params, self.extraction_model.hyper_params.dict, self.extraction_model.optimizer,
feature_names=self.extraction_model.features.feature_strings)
if self.model_file:
trainer.load(self.model_file)
self.extraction_model.trained_model = trainer
"""
self.feature_generator = None # feature generator
self.example_generator = None # example generator
# TODO this should really be renamed as task instead of model_type
if self.model_type.startswith('event-trigger_'):
self.feature_generator = EventTriggerFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting, self.domain)
self.example_generator = EventTriggerExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('event-argument_'):
self.feature_generator = EventArgumentFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EventArgumentExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('event-pair_'):
self.feature_generator = EventPairFeatureGenerator(extractor_params)
self.example_generator = EventPairExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('event-event-relation_'):
self.feature_generator = EventEventRelationFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EventEventRelationExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('entity-entity-relation_'):
self.feature_generator = EntityRelationFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EntityRelationExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('event-framepair_'):
self.feature_generator = EventFramePairFeatureGenerator(extractor_params)
self.example_generator = EventFramePairExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('entitycoref_'):
self.feature_generator = EntityCorefFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EntityCorefExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('eventcoref_'):
self.feature_generator = EventCorefFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)
self.example_generator = EventCorefExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
elif self.model_type.startswith('oregon'): # TODO hack, until YS has time to properly integrate after BETTER eval
pass
elif self.model_type.startswith('sequence_'):
self.feature_generator = SequenceFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting, self.extraction_model.tokenizer, self.domain)
self.example_generator = SequenceExampleGenerator(self.domain, params, extractor_params,
self.hyper_parameters)
else:
raise RuntimeError('Extractor model type: {} not implemented.'.format(self.model_type))
self.extraction_model_last_layer = None
""":type: keras.models.Model"""
self.emit_vectors = extractor_params.get('output_vectors', False)
self.class_thresholds = None
# load saved thresholds from file
self._build_threshold_vector()
# use a global threshold value if they were not loaded
if self.class_thresholds is None:
logging.info('Using global threshold override for {}'.format(
self.extractor_name))
# use defaults, if no global override given in extractor parameters
if self.class_thresholds_global < 0.0:
logging.info('Using default thresholds for {}'.format(
self.extractor_name))
self.class_thresholds_global = 0.5
number_of_classes = len(self.domain.event_types.keys())
logging.info('- global threshold ={}'.format(self.class_thresholds_global))
self.class_thresholds = np.asarray(
[self.class_thresholds_global] * number_of_classes)
def _build_threshold_vector(self):
path = self.class_thresholds_path
if path is not None and os.path.isfile(str(path)):
if path.endswith('.npz'):
self.class_thresholds = np.load(str(path))['thresholds']
print('Loaded saved thresholds from NPZ for {}'.format(
self.extractor_name))
elif path.endswith('.json'):
number_of_classes = len(self.domain.event_types.keys())
self.class_thresholds = np.asarray([0.5] * number_of_classes)
with codecs.open(path, 'r', encoding='utf8') as fh:
thresholds_json = json.load(fh)
for label, threshold in thresholds_json.items():
try:
index = self.domain.get_event_type_index(label)
self.class_thresholds[index] = float(threshold)
except ValueError as e:
print('The following error occurred while loading '
'thresholds from json and will be ignored:\n'
'{}'.format(e))
print('Loaded saved thresholds from JSON for {}'.format(
self.extractor_name))
def make_last_layer_model(self):
if self.extraction_model_last_layer is not None:
print("Last layer of model has already been built")
return
keras_model = self.extraction_model
if type(keras_model) is not KerasModel:
keras_model = keras_model.model
""":type: keras.models.Model"""
print("Original model:")
try:
print(keras_model.summary())
except TypeError:
print("Keras encountered an error when trying to print the model "
"summary. Skipping this printout...")
self.extraction_model_last_layer = KerasModel(
inputs=keras_model.input,
outputs=keras_model.layers[-2].output)
print("Copy model:")
try:
print(self.extraction_model_last_layer.summary())
except TypeError:
print("Keras encountered an error when trying to print the copy's "
"summary. Skipping this printout...")
def get_embeddings(self, examples, data_list):
ret = []
self.make_last_layer_model()
vectors = self.extraction_model_last_layer.predict(data_list)
for i, ex in enumerate(examples):
output_vector = vectors[i, :]
ret.append(output_vector)
return ret
def | (self):
try:
trained_keras_model = keras_load_model(self.model_file)
except ValueError:
custom_objects = keras_custom_objects
trained_keras_model = keras_load_model(self.model_file, custom_objects)
weights = trained_keras_model.get_weights()
new_weights = []
for i, w in enumerate(weights):
pretrained = self.extraction_model.layers.pretrained_embeddings
using_pretrained = pretrained is not None
if using_pretrained and i > 1 and w.shape[0] == pretrained.shape[0]:
# TODO retrain models to avoid this hack
pass
else:
new_weights.append(w)
weights = new_weights
# for i, w in enumerate(weights):
# print(i, w.shape
self.extraction_model.model.set_weights(weights)
| load_keras |
rawfile_unsafe.go | package rawfile
import (
tcpip "github.com/qxcheng/net-protocol/protocol"
"syscall"
"unsafe"
)
// NonBlockingWrite writes the given buffer to a file descriptor. It fails if
// partial data is written.
func NonBlockingWrite(fd int, buf []byte) *tcpip.Error {
var ptr unsafe.Pointer
if len(buf) > 0 {
ptr = unsafe.Pointer(&buf[0])
}
_, _, e := syscall.RawSyscall(syscall.SYS_WRITE, uintptr(fd), uintptr(ptr), uintptr(len(buf)))
if e != 0 {
return TranslateErrno(e)
}
return nil
}
// NonBlockingWrite2 writes up to two byte slices to a file descriptor in a
// single syscall. It fails if partial data is written.
func NonBlockingWrite2(fd int, b1, b2 []byte) *tcpip.Error {
if len(b2) == 0 {
return NonBlockingWrite(fd, b1)
}
// We have two buffers. Build the iovec that represents them and issue
// a writev syscall.
iovec := [...]syscall.Iovec{
{
Base: &b1[0],
Len: uint64(len(b1)),
},
{
Base: &b2[0],
Len: uint64(len(b2)),
},
}
_, _, e := syscall.RawSyscall(syscall.SYS_WRITEV, uintptr(fd), uintptr(unsafe.Pointer(&iovec[0])), uintptr(len(iovec)))
if e != 0 {
return TranslateErrno(e)
}
return nil
}
type pollEvent struct {
fd int32
events int16
revents int16
}
// BlockingRead reads from a file descriptor that is set up as non-blocking. If
// no data is available, it will block in a poll() syscall until the file
// descirptor becomes readable.
func BlockingRead(fd int, b []byte) (int, *tcpip.Error) {
for {
n, _, e := syscall.RawSyscall(syscall.SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)))
if e == 0 {
return int(n), nil
}
event := pollEvent{
fd: int32(fd),
events: 1,
}
_, e = blockingPoll(&event, 1, -1)
if e != 0 && e != syscall.EINTR {
return 0, TranslateErrno(e)
}
} | // BlockingReadv reads from a file descriptor that is set up as non-blocking and
// stores the data in a list of iovecs buffers. If no data is available, it will
// block in a poll() syscall until the file descirptor becomes readable.
func BlockingReadv(fd int, iovecs []syscall.Iovec) (int, *tcpip.Error) {
for {
n, _, e := syscall.RawSyscall(syscall.SYS_READV, uintptr(fd), uintptr(unsafe.Pointer(&iovecs[0])), uintptr(len(iovecs)))
if e == 0 {
return int(n), nil
}
event := pollEvent{
fd: int32(fd),
events: 1,
}
_, e = blockingPoll(&event, 1, -1)
if e != 0 && e != syscall.EINTR {
return 0, TranslateErrno(e)
}
}
} | }
|
cephfs_vol_mgmt_subvolume_gid_uid.py | import json
import random
import string
import traceback
from tests.cephfs.cephfs_utilsV1 import FsUtils
from utility.log import Log
log = Log(__name__)
def | (ceph_cluster, **kw):
"""
Test operation:
1. Create a subvolume
2. Check info for the subvolume
3. Check if gid and uid are set to 0
"""
try:
tc = "CEPH-83574181"
log.info(f"Running CephFS tests for BZ-{tc}")
fs_util = FsUtils(ceph_cluster)
clients = ceph_cluster.get_ceph_objects("client")
client1 = clients[0]
fs_details = fs_util.get_fs_info(client1)
if not fs_details:
fs_util.create_fs(client1, "cephfs")
fs_util.auth_list([client1])
subvolume_name_generate = "".join(
random.choice(string.ascii_lowercase + string.digits)
for _ in list(range(5))
)
subvolume = {
"vol_name": "cephfs",
"subvol_name": f"subvol_{subvolume_name_generate}",
"size": "5368706371",
}
fs_util.create_subvolume(client1, **subvolume, check_ec=False)
c_out, c_err = client1.exec_command(
sudo=True,
cmd=f"ceph fs subvolume info cephfs subvol_{subvolume_name_generate}",
)
c_out_decoded = json.loads(c_out)
gid = c_out_decoded["gid"]
uid = c_out_decoded["uid"]
if gid != 0 or uid != 0:
return 1
return 0
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
return 1
| run |
worklet.js | 'use strict';
class | extends AudioWorkletNode {
constructor(context) {
super(context, 'injector-processor');
this.port.onmessage = (event) => {
const { type } = event.data;
switch (type) {
case 'ready':
break;
default:
console.warn(
`web-audio-injector:worklet] Unhandled message type: ${type}`,
);
console.log('[web-audio-injector:worklet]', event.data);
}
};
}
postMessage(msg) {
this.port.postMessage(msg);
}
}
export default WebAudioInjectorNode;
| WebAudioInjectorNode |
main.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/// Rust Puppet, receiving commands to drive the Rust Inspect library.
///
/// This code doesn't check for illegal commands such as deleting a node
/// that doesn't exist. Illegal commands should be (and can be) filtered
/// within the Validator program by running the command sequence against the
/// local ("data::Data") implementation before sending them to the puppets.
use fuchsia_inspect::Property as UsablePropertyTrait;
use {
anyhow::{format_err, Context as _, Error},
fidl::endpoints::{create_request_stream, DiscoverableService},
fidl_fuchsia_inspect::TreeMarker,
fidl_fuchsia_io::{
DirectoryMarker, MODE_TYPE_DIRECTORY, OPEN_RIGHT_READABLE, OPEN_RIGHT_WRITABLE,
},
fidl_test_inspect_validate::*,
fuchsia_async as fasync,
fuchsia_component::server::{ServiceFs, ServiceObjTrait},
fuchsia_inspect::*,
fuchsia_syslog as syslog,
fuchsia_zircon::HandleBased,
futures::prelude::*,
log::*,
std::collections::HashMap,
std::sync::Arc,
vfs::{
directory::{
entry::DirectoryEntry,
helper::DirectlyMutable,
mutable::simple::{simple, Simple},
},
execution_scope::ExecutionScope,
path::Path,
service::host,
},
};
#[derive(Debug)]
enum Property {
// The names StringProperty, IntLinearHistogramProperty, etc. are built by macros such as
// create_linear_histogram_property_fn! in src/lib/diagnostics/inspect/rust/src/lib.rs.
// You won't find them by searching the codebase; instead search that file for macro_rules!
// and go from there.
// They're documented in https://fuchsia-docs.firebaseapp.com/rust/fuchsia_inspect/index.html.
String(StringProperty),
Int(IntProperty),
Uint(UintProperty),
Double(DoubleProperty),
Bytes(BytesProperty),
Bool(BoolProperty),
IntArray(IntArrayProperty),
UintArray(UintArrayProperty),
DoubleArray(DoubleArrayProperty),
IntLinearHistogram(IntLinearHistogramProperty),
UintLinearHistogram(UintLinearHistogramProperty),
DoubleLinearHistogram(DoubleLinearHistogramProperty),
IntExponentialHistogram(IntExponentialHistogramProperty),
UintExponentialHistogram(UintExponentialHistogramProperty),
DoubleExponentialHistogram(DoubleExponentialHistogramProperty),
}
struct Actor {
inspector: Inspector,
nodes: HashMap<u32, Node>,
properties: HashMap<u32, Property>,
lazy_children: HashMap<u32, LazyNode>,
}
/// Handles publishing and unpublishing an inspect tree.
struct Publisher {
inspector: Option<Inspector>,
dir: Arc<Simple>,
}
impl Publisher {
fn new(dir: Arc<Simple>) -> Self {
Self { inspector: None, dir }
}
fn publish(&mut self, inspector: Inspector) {
self.inspector = Some(inspector.clone());
self.dir
.clone()
.add_entry(
TreeMarker::SERVICE_NAME,
host(move |stream| {
let inspector_clone = inspector.clone();
async move {
service::handle_request_stream(inspector_clone, stream)
.await
.expect("failed to run server");
}
.boxed()
}),
)
.expect("add entry");
}
fn unpublish(&mut self) {
if self.inspector.is_some() {
self.dir.clone().remove_entry(TreeMarker::SERVICE_NAME).expect("remove entry");
}
self.inspector = None;
}
}
impl Drop for Publisher {
fn drop(&mut self) {
self.unpublish();
}
}
impl Actor {
fn new(inspector: Inspector) -> Actor {
Actor {
inspector,
nodes: HashMap::new(),
properties: HashMap::new(),
lazy_children: HashMap::new(),
}
}
fn act(&mut self, action: Action) -> Result<(), Error> {
match action {
Action::CreateNode(CreateNode { parent, id, name }) => {
self.nodes.insert(id, self.find_parent(parent)?.create_child(name));
}
Action::DeleteNode(DeleteNode { id }) => {
self.nodes.remove(&id);
}
Action::CreateNumericProperty(CreateNumericProperty { parent, id, name, value }) => {
self.properties.insert(
id,
match value {
Number::IntT(n) => {
Property::Int(self.find_parent(parent)?.create_int(name, n))
}
Number::UintT(n) => {
Property::Uint(self.find_parent(parent)?.create_uint(name, n))
}
Number::DoubleT(n) => {
Property::Double(self.find_parent(parent)?.create_double(name, n))
}
unknown => return Err(format_err!("Unknown number type {:?}", unknown)),
},
);
}
Action::CreateBytesProperty(CreateBytesProperty { parent, id, name, value }) => {
self.properties.insert(
id,
Property::Bytes(self.find_parent(parent)?.create_bytes(name, value)),
);
}
Action::CreateStringProperty(CreateStringProperty { parent, id, name, value }) => {
self.properties.insert(
id,
Property::String(self.find_parent(parent)?.create_string(name, value)),
);
}
Action::CreateBoolProperty(CreateBoolProperty { parent, id, name, value }) => {
self.properties
.insert(id, Property::Bool(self.find_parent(parent)?.create_bool(name, value)));
}
Action::DeleteProperty(DeleteProperty { id }) => {
self.properties.remove(&id);
}
Action::SetNumber(SetNumber { id, value }) => {
match (self.find_property(id)?, value) {
(Property::Int(p), Number::IntT(v)) => p.set(v),
(Property::Uint(p), Number::UintT(v)) => p.set(v),
(Property::Double(p), Number::DoubleT(v)) => p.set(v),
unexpected => {
return Err(format_err!("Illegal types {:?} for SetNumber", unexpected))
}
};
}
Action::AddNumber(AddNumber { id, value }) => {
match (self.find_property(id)?, value) {
(Property::Int(p), Number::IntT(v)) => p.add(v),
(Property::Uint(p), Number::UintT(v)) => p.add(v),
(Property::Double(p), Number::DoubleT(v)) => p.add(v),
unexpected => {
return Err(format_err!("Illegal types {:?} for AddNumber", unexpected))
}
};
}
Action::SubtractNumber(SubtractNumber { id, value }) => {
match (self.find_property(id)?, value) {
(Property::Int(p), Number::IntT(v)) => p.subtract(v),
(Property::Uint(p), Number::UintT(v)) => p.subtract(v),
(Property::Double(p), Number::DoubleT(v)) => p.subtract(v),
unexpected => {
return Err(format_err!(
"Illegal types {:?} for SubtractNumber",
unexpected
))
}
};
}
Action::SetString(SetString { id, value }) => match self.find_property(id)? {
Property::String(p) => p.set(&value),
unexpected => {
return Err(format_err!("Illegal property {:?} for SetString", unexpected))
}
},
Action::SetBytes(SetBytes { id, value }) => match self.find_property(id)? {
Property::Bytes(p) => p.set(&value),
unexpected => {
return Err(format_err!("Illegal property {:?} for SetBytes", unexpected))
}
},
Action::SetBool(SetBool { id, value }) => match self.find_property(id)? {
Property::Bool(p) => p.set(value),
unexpected => {
return Err(format_err!("Illegal property {:?} for SetBool", unexpected))
}
},
Action::CreateArrayProperty(CreateArrayProperty {
parent,
id,
name,
slots,
number_type,
}) => {
self.properties.insert(
id,
match number_type {
NumberType::Int => Property::IntArray(
self.find_parent(parent)?.create_int_array(name, slots as usize),
),
NumberType::Uint => Property::UintArray(
self.find_parent(parent)?.create_uint_array(name, slots as usize),
),
NumberType::Double => Property::DoubleArray(
self.find_parent(parent)?.create_double_array(name, slots as usize),
),
},
);
}
Action::ArraySet(ArraySet { id, index, value }) => {
match (self.find_property(id)?, value) {
(Property::IntArray(p), Number::IntT(v)) => p.set(index as usize, v),
(Property::UintArray(p), Number::UintT(v)) => p.set(index as usize, v),
(Property::DoubleArray(p), Number::DoubleT(v)) => p.set(index as usize, v),
unexpected => {
return Err(format_err!("Illegal types {:?} for ArraySet", unexpected))
}
};
}
Action::ArrayAdd(ArrayAdd { id, index, value }) => {
match (self.find_property(id)?, value) {
(Property::IntArray(p), Number::IntT(v)) => p.add(index as usize, v),
(Property::UintArray(p), Number::UintT(v)) => p.add(index as usize, v),
(Property::DoubleArray(p), Number::DoubleT(v)) => p.add(index as usize, v),
unexpected => {
return Err(format_err!("Illegal types {:?} for ArrayAdd", unexpected))
}
};
}
Action::ArraySubtract(ArraySubtract { id, index, value }) => {
match (self.find_property(id)?, value) {
(Property::IntArray(p), Number::IntT(v)) => p.subtract(index as usize, v),
(Property::UintArray(p), Number::UintT(v)) => p.subtract(index as usize, v),
(Property::DoubleArray(p), Number::DoubleT(v)) => p.subtract(index as usize, v),
unexpected => {
return Err(format_err!("Illegal types {:?} for ArraySubtract", unexpected))
}
};
}
Action::CreateLinearHistogram(CreateLinearHistogram {
parent,
id,
name,
floor,
step_size,
buckets,
}) => {
let buckets = buckets as usize;
self.properties.insert(
id,
match (floor, step_size) {
(Number::IntT(floor), Number::IntT(step_size)) => {
Property::IntLinearHistogram(
self.find_parent(parent)?.create_int_linear_histogram(
name,
LinearHistogramParams { floor, step_size, buckets },
),
)
}
(Number::UintT(floor), Number::UintT(step_size)) => {
Property::UintLinearHistogram(
self.find_parent(parent)?.create_uint_linear_histogram(
name,
LinearHistogramParams { floor, step_size, buckets },
),
)
}
(Number::DoubleT(floor), Number::DoubleT(step_size)) => {
Property::DoubleLinearHistogram(
self.find_parent(parent)?.create_double_linear_histogram(
name,
LinearHistogramParams { floor, step_size, buckets },
),
)
}
unexpected => {
return Err(format_err!(
"Illegal types {:?} for CreateLinearHistogram",
unexpected
))
}
},
);
}
Action::CreateExponentialHistogram(CreateExponentialHistogram {
parent,
id,
name,
floor,
initial_step,
step_multiplier,
buckets,
}) => {
let buckets = buckets as usize;
self.properties.insert(
id,
match (floor, initial_step, step_multiplier) {
(
Number::IntT(floor),
Number::IntT(initial_step),
Number::IntT(step_multiplier),
) => Property::IntExponentialHistogram(
self.find_parent(parent)?.create_int_exponential_histogram(
name,
ExponentialHistogramParams {
floor,
initial_step,
step_multiplier,
buckets,
},
),
),
(
Number::UintT(floor),
Number::UintT(initial_step),
Number::UintT(step_multiplier),
) => Property::UintExponentialHistogram(
self.find_parent(parent)?.create_uint_exponential_histogram(
name,
ExponentialHistogramParams {
floor,
initial_step,
step_multiplier,
buckets,
},
),
),
(
Number::DoubleT(floor),
Number::DoubleT(initial_step),
Number::DoubleT(step_multiplier),
) => Property::DoubleExponentialHistogram(
self.find_parent(parent)?.create_double_exponential_histogram(
name,
ExponentialHistogramParams {
floor,
initial_step,
step_multiplier,
buckets,
},
),
),
unexpected => {
return Err(format_err!(
"Illegal types {:?} for CreateExponentialHistogram",
unexpected
))
}
},
);
}
Action::Insert(Insert { id, value }) => {
match (self.find_property(id)?, value) {
(Property::IntLinearHistogram(p), Number::IntT(v)) => p.insert(v),
(Property::UintLinearHistogram(p), Number::UintT(v)) => p.insert(v),
(Property::DoubleLinearHistogram(p), Number::DoubleT(v)) => p.insert(v),
(Property::IntExponentialHistogram(p), Number::IntT(v)) => p.insert(v),
(Property::UintExponentialHistogram(p), Number::UintT(v)) => p.insert(v),
(Property::DoubleExponentialHistogram(p), Number::DoubleT(v)) => p.insert(v),
unexpected => {
return Err(format_err!("Illegal types {:?} for Insert", unexpected))
}
};
}
Action::InsertMultiple(InsertMultiple { id, value, count }) => {
match (self.find_property(id)?, value) {
(Property::IntLinearHistogram(p), Number::IntT(v)) => {
p.insert_multiple(v, count as usize)
}
(Property::UintLinearHistogram(p), Number::UintT(v)) => {
p.insert_multiple(v, count as usize)
}
(Property::DoubleLinearHistogram(p), Number::DoubleT(v)) => {
p.insert_multiple(v, count as usize)
}
(Property::IntExponentialHistogram(p), Number::IntT(v)) => {
p.insert_multiple(v, count as usize)
}
(Property::UintExponentialHistogram(p), Number::UintT(v)) => {
p.insert_multiple(v, count as usize)
}
(Property::DoubleExponentialHistogram(p), Number::DoubleT(v)) => {
p.insert_multiple(v, count as usize)
}
unexpected => {
return Err(format_err!(
"Illegal types {:?} for InsertMultiple",
unexpected
))
}
};
}
unexpected => {
// "Illegal" is the appropriate response here, not "Unimplemented".
// Known-Unimplemented actions should be matched explicitly.
return Err(format_err!("Unexpected action {:?}", unexpected));
}
};
Ok(())
}
fn act_lazy(&mut self, lazy_action: LazyAction) -> Result<(), Error> {
match lazy_action {
LazyAction::CreateLazyNode(CreateLazyNode {
parent,
id,
name,
disposition,
actions,
}) => {
let parent = self.find_parent(parent)?;
let lazy_child = Self::create_lazy_node(&parent, name, disposition, actions)?;
self.lazy_children.insert(id, lazy_child);
}
LazyAction::DeleteLazyNode(DeleteLazyNode { id }) => {
self.lazy_children.remove(&id);
}
_ => {
return Err(format_err!("Unknown lazy action {:?}", lazy_action));
}
}
Ok(())
}
fn find_parent<'a>(&'a self, id: u32) -> Result<&'a Node, Error> {
if id == ROOT_ID {
Ok(self.inspector.root())
} else {
self.nodes.get(&id).ok_or_else(|| format_err!("Node {} not found", id))
}
}
fn find_property<'a>(&'a self, id: u32) -> Result<&'a Property, Error> |
fn create_lazy_node(
parent: &Node,
name: String,
disposition: LinkDisposition,
actions: Vec<Action>,
) -> Result<LazyNode, Error> {
let mut actor = Actor::new(Inspector::new());
for action in actions.into_iter() {
if let Err(err) = actor.act(action) {
return Err(format_err!("Failed to perform action on lazy node: {:?}", err));
}
}
let callback = move || {
let clone = actor.inspector.clone();
async move { Ok(clone) }.boxed()
};
Ok(match disposition {
LinkDisposition::Child => parent.create_lazy_child(&name, callback),
LinkDisposition::Inline => parent.create_lazy_values(&name, callback),
})
}
}
fn new_inspector(params: &InitializationParams) -> Inspector {
match params.vmo_size {
Some(size) => Inspector::new_with_size(size as usize),
None => Inspector::new(),
}
}
async fn run_driver_service(
mut stream: ValidateRequestStream,
mut publisher: Publisher,
) -> Result<(), Error> {
let mut actor_maybe: Option<Actor> = None;
while let Some(event) = stream.try_next().await? {
match event {
ValidateRequest::Initialize { params, responder } => {
if actor_maybe.is_some() {
responder.send(None, TestResult::Illegal).context("Double initialize call")?;
} else {
let actor = Actor::new(new_inspector(¶ms));
responder
.send(
actor.inspector.duplicate_vmo().map(|v| v.into_handle()),
TestResult::Ok,
)
.context("responding to initialize")?;
actor_maybe = Some(actor);
}
}
ValidateRequest::InitializeTree { params, responder } => {
let actor = Actor::new(new_inspector(¶ms));
let (tree, request_stream) = create_request_stream::<TreeMarker>()?;
service::spawn_tree_server(actor.inspector.clone(), request_stream);
responder.send(Some(tree), TestResult::Ok)?;
actor_maybe = Some(actor);
}
ValidateRequest::Act { action, responder } => {
let result = if let Some(a) = &mut actor_maybe {
match a.act(action) {
Ok(()) => TestResult::Ok,
Err(error) => {
warn!("Act saw illegal condition {:?}", error);
TestResult::Illegal
}
}
} else {
TestResult::Illegal
};
responder.send(result)?;
}
ValidateRequest::ActLazy { lazy_action, responder } => {
let result = if let Some(a) = &mut actor_maybe {
match a.act_lazy(lazy_action) {
Ok(()) => TestResult::Ok,
Err(error) => {
warn!("ActLazy saw illegal condition {:?}", error);
TestResult::Illegal
}
}
} else {
TestResult::Illegal
};
responder.send(result)?;
}
ValidateRequest::Publish { responder } => match &actor_maybe {
Some(ref actor) => {
publisher.publish(actor.inspector.clone());
responder.send(TestResult::Ok)?;
}
None => {
responder.send(TestResult::Illegal)?;
}
},
ValidateRequest::Unpublish { responder } => {
publisher.unpublish();
responder.send(TestResult::Ok)?;
}
}
}
Ok(())
}
enum IncomingService {
Validate(ValidateRequestStream),
// ... more services here
}
fn make_diagnostics_dir<T: ServiceObjTrait>(fs: &mut ServiceFs<T>) -> Arc<Simple> {
let (proxy, server) =
fidl::endpoints::create_proxy::<DirectoryMarker>().expect("create directory marker");
let dir = simple();
let server_end = server.into_channel().into();
let scope = ExecutionScope::new();
dir.clone().open(
scope,
OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE,
MODE_TYPE_DIRECTORY,
Path::empty(),
server_end,
);
fs.add_remote("diagnostics", proxy);
dir
}
#[fasync::run_singlethreaded]
async fn main() -> Result<(), Error> {
syslog::init_with_tags(&[]).expect("should not fail");
info!("Puppet starting");
let mut fs = ServiceFs::new_local();
fs.dir("svc").add_fidl_service(IncomingService::Validate);
let dir = make_diagnostics_dir(&mut fs);
fs.take_and_serve_directory_handle()?;
// Set concurrent > 1, otherwise additional requests hang on the completion of the Validate
// service.
const MAX_CONCURRENT: usize = 4;
let fut = fs.for_each_concurrent(MAX_CONCURRENT, |IncomingService::Validate(stream)| {
run_driver_service(stream, Publisher::new(dir.clone()))
.unwrap_or_else(|e| error!("ERROR in puppet's main: {:?}", e))
});
fut.await;
Ok(())
}
| {
self.properties.get(&id).ok_or_else(|| format_err!("Property {} not found", id))
} |
dialogflow.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the DialogflowConverter class used to convert Dialogflow projects
into Mindmeld projects"""
import json
import logging
import os
import re
from sklearn.model_selection import train_test_split
from mindmeld.converter.converter import Converter
logger = logging.getLogger(__name__)
class DialogflowConverter(Converter):
"""The class is a sub class of the abstract Converter class. This class
contains the methods required to convert a Dialogflow project into a MindMeld project
"""
sys_entity_map = {
"@sys.date-time": "sys_interval",
"@sys.date": "sys_time",
"@sys.date-period": "sys_interval",
"@sys.time": "sys_time",
"@sys.time-period": "sys_duration",
"@sys.duration": "sys_duration",
"@sys.number": "sys_number",
"@sys.cardinal": "sys_number",
"@sys.ordinal": "sys_ordinal",
"@sys.unit-currency": "sys_amount-of-money",
"@sys.unit-volume": "sys_volume",
"@sys.email": "sys_email",
"@sys.phone-number": "sys_phone-number",
"@sys.url": "sys_url",
}
# TODO: provide support for entities listed in sys_entity_map_todo
sys_entity_map_todo = [
"@sys.number-integer",
"@sys.number-sequence",
"@sys.flight-number",
"@sys.unit-area",
"@sys.unit-length",
"@sys.unit-speed",
"@sys.unit-information",
"@sys.percentage",
"@sys.temperature",
"@sys.duration",
"@sys.age",
"@sys.currency-name",
"@sys.unit-area-name",
"@sys.unit-length-name",
"@sys.unit-speed-name",
"@sys.unit-volume-name",
"@sys.unit-weight-name",
"@sys.unit-information-name",
"@sys.address",
"@sys.zip-code",
"@sys.geo-capital",
"@sys.geo-country",
"@sys.geo-country-code",
"@sys.geo-city",
"@sys.geo-state",
"@sys.geo-city",
"@sys.geo-state",
"@sys.place-attraction",
"@sys.airport",
"@sys.location",
"@sys.given-name",
"@sys.last-name",
"@sys.person",
"@sys.music-artist",
"@sys.music-genre",
"@sys.color",
"@sys.language",
"@sys.any",
]
def __init__(self, dialogflow_project_directory, mindmeld_project_directory):
if os.path.exists(os.path.dirname(dialogflow_project_directory)):
self.dialogflow_project_directory = dialogflow_project_directory
self.mindmeld_project_directory = mindmeld_project_directory
self.directory = os.path.dirname(os.path.realpath(__file__))
self.entities_list = set()
self.intents_list = set()
else:
msg = "`{dialogflow_project_directory}` does not exist. Please verify."
msg = msg.format(dialogflow_project_directory=dialogflow_project_directory)
raise FileNotFoundError(msg)
def create_mindmeld_directory(self):
self.create_directory(self.mindmeld_project_directory)
self.create_directory(os.path.join(self.mindmeld_project_directory, "data"))
self.create_directory(os.path.join(self.mindmeld_project_directory, "domains"))
self.create_directory(
os.path.join(self.mindmeld_project_directory, "domains", "general")
)
self.create_directory(os.path.join(self.mindmeld_project_directory, "entities"))
# =========================
# create training data (entities, intents)
# =========================
def _create_entities_directories(self, entities):
""" Creates directories + files for all languages/files.
Currently does not use meta data in entityName.json files (the keys in var entities).
"""
for languages in entities.values():
for sub in languages.values():
dialogflow_entity_file = os.path.join(
self.dialogflow_project_directory, "entities", sub + ".json"
)
mindmeld_entity_directory_name = self.clean_check(
sub, self.entities_list
)
mindmeld_entity_directory = os.path.join(
self.mindmeld_project_directory,
"entities",
mindmeld_entity_directory_name,
)
self.create_directory(mindmeld_entity_directory)
self._create_entity_file(
dialogflow_entity_file, mindmeld_entity_directory
)
@staticmethod
def _create_entity_file(dialogflow_entity_file, mindmeld_entity_directory):
source_en = open(dialogflow_entity_file, "r")
target_gazetteer = open(
os.path.join(mindmeld_entity_directory, "gazetteer.txt"), "w"
)
target_mapping = open(
os.path.join(mindmeld_entity_directory, "mapping.json"), "w"
)
datastore = json.load(source_en)
mapping_dict = {"entities": []}
for item in datastore:
new_dict = {}
while ("value" in item) and (item["value"] in item["synonyms"]):
item["synonyms"].remove(item["value"])
new_dict["whitelist"] = item["synonyms"]
new_dict["cname"] = item["value"]
mapping_dict["entities"].append(new_dict)
target_gazetteer.write(item["value"] + "\n")
json.dump(mapping_dict, target_mapping, ensure_ascii=False, indent=2)
source_en.close()
target_gazetteer.close()
target_mapping.close()
def _create_intents_directories(self, intents):
""" Creates directories + files for all languages/files."""
for languages in intents.values():
for language, sub in languages.items():
dialogflow_intent_file = os.path.join(
self.dialogflow_project_directory, "intents", sub + ".json"
)
mindmeld_intent_directory_name = self.clean_check(
sub, self.intents_list
)
mindmeld_intent_directory = os.path.join(
self.mindmeld_project_directory,
"domains",
"general",
mindmeld_intent_directory_name,
)
self.create_directory(mindmeld_intent_directory)
self._create_intent_file(
dialogflow_intent_file, mindmeld_intent_directory, language
)
def _create_intent_file(
self, dialogflow_intent_file, mindmeld_intent_directory, language
):
source_en = open(dialogflow_intent_file, "r")
target_test = open(os.path.join(mindmeld_intent_directory, "test.txt"), "w")
target_train = open(os.path.join(mindmeld_intent_directory, "train.txt"), "w")
datastore = json.load(source_en)
all_text = []
for usersay in datastore:
sentence = ""
for texts in usersay["data"]:
df_text = texts["text"]
if "meta" in texts and texts["meta"] != "@sys.ignore":
df_meta = texts["meta"]
if re.match(
"(@sys.).+", df_meta
): # if text is a dialogflow sys entity
if df_meta in DialogflowConverter.sys_entity_map:
mm_meta = DialogflowConverter.sys_entity_map[df_meta]
else:
mm_meta = "[DNE: {sysEntity}]".format(sysEntity=df_meta[1:])
logger.info(
"Unfortunately mindmeld does not currently support"
"%s as a sys entity."
"Please create an entity for this.",
df_meta[1:],
)
entity_type = self.clean_name(mm_meta) + "_entries_" + language
part = "{" + df_text + "|" + entity_type + "}"
else:
entity_type = (
self.clean_name(df_meta[1:]) + "_entries_" + language
)
part = "{" + df_text + "|" + entity_type + "}"
else:
part = df_text
sentence += part
all_text.append(sentence)
train, test = train_test_split(all_text, test_size=0.2)
target_test.write("\n".join(test))
target_train.write("\n".join(train))
source_en.close()
target_test.close()
target_train.close()
def _get_file_names(self, level):
""" Gets the names of the entities from Dialogflow as a dictionary.
levels (str): either "entities" or "intents"
ex. if we had the following files in our entities directory:
["test.json", "test_entries_en.json", "test_entries_de.json"]
it returns:
{'test': {'en': 'test_entries_en', 'de': 'test_entries_de'}} """
directory = os.path.join(self.dialogflow_project_directory, level)
files = os.listdir(directory)
w = {"entities": "entries", "intents": "usersays"}
p = r".+(?<=(_" + w[level] + "_))(.*)(?=(.json))"
info = {}
for name in files:
match = re.match(p, name)
if match:
isbase = False
base = name[: match.start(1)]
language = match.group(2)
else:
isbase = True
base = name[:-5]
if base not in info:
info[base] = {}
if not isbase:
info[base][language] = name[:-5]
return info
def create_mindmeld_training_data(self):
entities = self._get_file_names("entities")
self._create_entities_directories(entities)
intents = self._get_file_names("intents")
self._create_intents_directories(intents)
# =========================
# create init
# =========================
@staticmethod
def create_handle(params):
return "@app.handle(" + params + ")"
@staticmethod
def create_header(function_name):
return "def " + function_name + "(request, responder):"
@staticmethod
def create_function(handles, function_name, replies):
assert isinstance(handles, list)
result = ""
for handle in handles:
result += DialogflowConverter.create_handle(handle) + "\n"
result += DialogflowConverter.create_header(function_name) + "\n"
result += " " + "replies = {}".format(replies) + "\n"
result += " " + "responder.reply(replies)"
return result
@staticmethod
def clean_name(name):
""" Takes in a string and returns a valid folder name (no spaces, all lowercase)."""
name = re.sub(r"[^\w\s-]", "", name).strip().lower()
name = re.sub(r"[-\s]+", "_", name)
return name
def clean_check(self, name, lst):
""" Takes in a list of strings and a name.
Returns name cleaned if the cleaned name is not found in lst."""
cleaned = self.clean_name(name)
if cleaned not in lst:
lst.add(cleaned)
return cleaned
else:
logger.error(
"%s name has been created twice. Please ensure there "
"are no duplicate names in the dialogflow files and "
"filenames are valid (no spaces or special characters)",
cleaned,
)
def create_mindmeld_init(self):
with open(
os.path.join(self.mindmeld_project_directory, "__init__.py"), "w"
) as target:
begin_info = [
"# -*- coding: utf-8 -*-",
'"""This module contains the MindMeld application"""',
"from mindmeld import Application",
"app = Application(__name__)",
"__all__ = ['app']",
]
for info, spacing in zip(begin_info, [1, 2, 1, 1, 0]):
target.write(info + "\n" * spacing)
intents = self._get_file_names("intents")
for i, main in enumerate(intents.keys()):
df_main = os.path.join(
self.dialogflow_project_directory, "intents", main + ".json"
)
with open(df_main) as source:
if "usersays" in df_main:
logger.error(
"Please check if your intent file"
"names are correctly labeled."
)
datastore = json.load(source)
replies = []
for response in datastore["responses"]:
for message in response["messages"]:
language = message["lang"]
if "speech" in message:
data = message["speech"]
replies = data if isinstance(data, list) else [data]
if datastore["fallbackIntent"]:
function_name = "default" + "_" + language
if language == "en":
# TODO: support multiple defaults for languages
handles = [
"default=True",
"intent='unsupported'",
]
else:
handles = ["intent='unsupported'"]
else:
function_name = "renameMe" + str(i) + "_" + language
handles = [
"intent="
+ "'"
+ self.clean_name(datastore["name"])
+ "_usersays_"
+ language
+ "'"
]
target.write(
"\n\n\n"
+ self.create_function(
handles=handles,
function_name=function_name,
replies=replies,
)
)
target.write("\n")
# =========================
# convert project
# =========================
def | (self):
""" Converts a Dialogflow project into a MindMeld project.
Dialogflow projects consist of entities and intents.
note on languages:
Dialogflow supports multiple languages and locales. They store their training
data for different languages in different files. So, the name of each training
file ends with a meta tag, two letters long for language, and an additional
two letters for dialect (if applicable). For example, a file ending in "_en-au"
indicates it's in English (Australia). Below we use "la" to represent this
meta tag.
entities folder contains:
entityName.json - Meta data about entityName for all languages.
entityName_entries_la.json - One for each language, contains entitiy mappings.
intents folder contain:
intentName.json - Contains rules, information about conversation flow, meta data.
Contains previously mentioned information and responses for all languages.
intentName_usersays_la.json - one for each language,
contains training data to recognize intentName
Limitations:
- The converter is unable to create an entity when it encounters an
unrecognized entity (an entity not defined under entities folder
or system entities), and labels such entities as DNE in training data.
- The converter currently does not automatically convert features like
slot filling, contexts, and follow-up intents. Users can still implement such
features and more.
- Information in agent.json are not copied over.
- There is no official support for different languages. Users can still
implement this. The converter is able to successfully convert dialogflow
bots that support multiple languages.
Mindmeld:
- Users can store data locally
- Users can build a knowledge base (currently beta in Dialogflow).
- Users can configure the machine learning models to best suit their needs.
- Users have more flexibility in defining their own features, including
ones like slot filling, contexts, and follow-up intents.
"""
logger.info("Converting project.")
# Create project directory with sub folders
self.create_mindmeld_directory()
# Transfer over test data from Dialogflow project and reformat to Mindmeld project
self.create_mindmeld_training_data()
file_loc = os.path.dirname(os.path.realpath(__file__))
self.create_config(self.mindmeld_project_directory, file_loc)
self.create_main(self.mindmeld_project_directory, file_loc)
self.create_mindmeld_init()
logger.info("Project converted.")
| convert_project |
kbucket.rs | // Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Implementation of a Kademlia routing table as used by a single peer
//! participating in a Kademlia DHT.
//!
//! The entry point for the API of this module is a [`KBucketsTable`].
//!
//! ## Pending Insertions
//!
//! When the bucket associated with the `Key` of an inserted entry is full
//! but contains disconnected nodes, it accepts a [`PendingEntry`].
//! Pending entries are inserted lazily when their timeout is found to be expired
//! upon querying the `KBucketsTable`. When that happens, the `KBucketsTable` records
//! an [`AppliedPending`] result which must be consumed by calling [`take_applied_pending`]
//! regularly and / or after performing lookup operations like [`entry`] and [`closest`].
//!
//! [`entry`]: kbucket::KBucketsTable::entry
//! [`closest`]: kbucket::KBucketsTable::closest
//! [`AppliedPending`]: kbucket::AppliedPending
//! [`KBucketsTable`]: kbucket::KBucketsTable
//! [`take_applied_pending`]: kbucket::KBucketsTable::take_applied_pending
//! [`PendingEntry`]: kbucket::PendingEntry
// [Implementation Notes]
//
// 1. Routing Table Layout
//
// The routing table is currently implemented as a fixed-size "array" of
// buckets, ordered by increasing distance relative to a local key
// that identifies the local peer. This is an often-used, simplified
// implementation that approximates the properties of the b-tree (or prefix tree)
// implementation described in the full paper [0], whereby buckets are split on-demand.
// This should be treated as an implementation detail, however, so that the
// implementation may change in the future without breaking the API.
//
// 2. Replacement Cache
//
// In this implementation, the "replacement cache" for unresponsive peers
// consists of a single entry per bucket. Furthermore, this implementation is
// currently tailored to connection-oriented transports, meaning that the
// "LRU"-based ordering of entries in a bucket is actually based on the last reported
// connection status of the corresponding peers, from least-recently (dis)connected to
// most-recently (dis)connected, and controlled through the `Entry` API. As a result,
// the nodes in the buckets are not reordered as a result of RPC activity, but only as a
// result of nodes being marked as connected or disconnected. In particular,
// if a bucket is full and contains only entries for peers that are considered
// connected, no pending entry is accepted. See the `bucket` submodule for
// further details.
//
// [0]: https://pdos.csail.mit.edu/~petar/papers/maymounkov-kademlia-lncs.pdf
mod bucket;
mod entry;
mod key;
pub use entry::*;
use arrayvec::{self, ArrayVec};
use bucket::KBucket;
use std::collections::VecDeque;
use std::time::{Duration, Instant};
/// Maximum number of k-buckets.
const NUM_BUCKETS: usize = 256;
/// A `KBucketsTable` represents a Kademlia routing table.
#[derive(Debug, Clone)]
pub struct KBucketsTable<TKey, TVal> {
/// The key identifying the local peer that owns the routing table.
local_key: TKey,
/// The buckets comprising the routing table.
buckets: Vec<KBucket<TKey, TVal>>,
/// The list of evicted entries that have been replaced with pending
/// entries since the last call to [`KBucketsTable::take_applied_pending`].
applied_pending: VecDeque<AppliedPending<TKey, TVal>>
}
/// A (type-safe) index into a `KBucketsTable`, i.e. a non-negative integer in the
/// interval `[0, NUM_BUCKETS)`.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct BucketIndex(usize);
impl BucketIndex {
/// Creates a new `BucketIndex` for a `Distance`.
///
/// The given distance is interpreted as the distance from a `local_key` of
/// a `KBucketsTable`. If the distance is zero, `None` is returned, in
/// recognition of the fact that the only key with distance `0` to a
/// `local_key` is the `local_key` itself, which does not belong in any
/// bucket.
fn new(d: &Distance) -> Option<BucketIndex> {
(NUM_BUCKETS - d.0.leading_zeros() as usize)
.checked_sub(1)
.map(BucketIndex)
}
/// Gets the index value as an unsigned integer.
fn get(&self) -> usize {
self.0
}
/// Generates a random distance that falls into the bucket for this index.
fn rand_distance(&self, rng: &mut impl rand::Rng) -> Distance {
let mut bytes = [0u8; 32];
let quot = self.0 / 8;
for i in 0 .. quot {
bytes[31 - i] = rng.gen();
}
let rem = (self.0 % 8) as u32;
let lower = usize::pow(2, rem);
let upper = usize::pow(2, rem + 1);
bytes[31 - quot] = rng.gen_range(lower, upper) as u8;
Distance(bigint::U256::from(bytes))
}
}
impl<TKey, TVal> KBucketsTable<TKey, TVal>
where
TKey: Clone + AsRef<KeyBytes>,
TVal: Clone
{
/// Creates a new, empty Kademlia routing table with entries partitioned
/// into buckets as per the Kademlia protocol.
///
/// The given `pending_timeout` specifies the duration after creation of
/// a [`PendingEntry`] after which it becomes eligible for insertion into
/// a full bucket, replacing the least-recently (dis)connected node.
pub fn new(local_key: TKey, pending_timeout: Duration) -> Self {
KBucketsTable {
local_key,
buckets: (0 .. NUM_BUCKETS).map(|_| KBucket::new(pending_timeout)).collect(),
applied_pending: VecDeque::new()
}
}
/// Returns the local key.
pub fn local_key(&self) -> &TKey {
&self.local_key
}
/// Returns an `Entry` for the given key, representing the state of the entry
/// in the routing table.
pub fn entry<'a>(&'a mut self, key: &'a TKey) -> Entry<'a, TKey, TVal> {
let index = BucketIndex::new(&self.local_key.as_ref().distance(key));
if let Some(i) = index {
let bucket = &mut self.buckets[i.get()];
if let Some(applied) = bucket.apply_pending() {
self.applied_pending.push_back(applied)
}
Entry::new(bucket, key)
} else {
Entry::SelfEntry
}
}
/// Returns an iterator over all the entries in the routing table.
pub fn iter<'a>(&'a mut self) -> impl Iterator<Item = EntryRefView<'a, TKey, TVal>> {
let applied_pending = &mut self.applied_pending;
self.buckets.iter_mut().flat_map(move |table| {
if let Some(applied) = table.apply_pending() {
applied_pending.push_back(applied)
}
let table = &*table;
table.iter().map(move |(n, status)| {
EntryRefView {
node: NodeRefView {
key: &n.key,
value: &n.value
},
status
}
})
})
}
/// Returns a by-reference iterator over all buckets.
///
/// The buckets are ordered by proximity to the `local_key`, i.e. the first
/// bucket is the closest bucket (containing at most one key).
pub fn buckets<'a>(&'a mut self) -> impl Iterator<Item = KBucketRef<'a, TKey, TVal>> + 'a {
let applied_pending = &mut self.applied_pending;
self.buckets.iter_mut().enumerate().map(move |(i, b)| {
if let Some(applied) = b.apply_pending() {
applied_pending.push_back(applied)
}
KBucketRef {
index: BucketIndex(i),
bucket: b
}
})
}
/// Consumes the next applied pending entry, if any.
///
/// When an entry is attempted to be inserted and the respective bucket is full,
/// it may be recorded as pending insertion after a timeout, see [`InsertResult::Pending`].
///
/// If the oldest currently disconnected entry in the respective bucket does not change
/// its status until the timeout of pending entry expires, it is evicted and
/// the pending entry inserted instead. These insertions of pending entries
/// happens lazily, whenever the `KBucketsTable` is accessed, and the corresponding
/// buckets are updated accordingly. The fact that a pending entry was applied is
/// recorded in the `KBucketsTable` in the form of `AppliedPending` results, which must be
/// consumed by calling this function.
pub fn | (&mut self) -> Option<AppliedPending<TKey, TVal>> {
self.applied_pending.pop_front()
}
/// Returns an iterator over the keys closest to `target`, ordered by
/// increasing distance.
pub fn closest_keys<'a, T>(&'a mut self, target: &'a T)
-> impl Iterator<Item = TKey> + 'a
where
T: Clone + AsRef<KeyBytes>
{
let distance = self.local_key.as_ref().distance(target);
ClosestIter {
target,
iter: None,
table: self,
buckets_iter: ClosestBucketsIter::new(distance),
fmap: |b: &KBucket<TKey, _>| -> ArrayVec<_> {
b.iter().map(|(n,_)| n.key.clone()).collect()
}
}
}
/// Returns an iterator over the nodes closest to the `target` key, ordered by
/// increasing distance.
pub fn closest<'a, T>(&'a mut self, target: &'a T)
-> impl Iterator<Item = EntryView<TKey, TVal>> + 'a
where
T: Clone + AsRef<KeyBytes>,
TVal: Clone
{
let distance = self.local_key.as_ref().distance(target);
ClosestIter {
target,
iter: None,
table: self,
buckets_iter: ClosestBucketsIter::new(distance),
fmap: |b: &KBucket<_, TVal>| -> ArrayVec<_> {
b.iter().map(|(n, status)| EntryView {
node: n.clone(),
status
}).collect()
}
}
}
/// Counts the number of nodes between the local node and the node
/// closest to `target`.
///
/// The number of nodes between the local node and the target are
/// calculated by backtracking from the target towards the local key.
pub fn count_nodes_between<T>(&mut self, target: &T) -> usize
where
T: AsRef<KeyBytes>
{
let local_key = self.local_key.clone();
let distance = target.as_ref().distance(&local_key);
let mut iter = ClosestBucketsIter::new(distance).take_while(|i| i.get() != 0);
if let Some(i) = iter.next() {
let num_first = self.buckets[i.get()].iter()
.filter(|(n,_)| n.key.as_ref().distance(&local_key) <= distance)
.count();
let num_rest: usize = iter.map(|i| self.buckets[i.get()].num_entries()).sum();
num_first + num_rest
} else {
0
}
}
}
/// An iterator over (some projection of) the closest entries in a
/// `KBucketsTable` w.r.t. some target `Key`.
struct ClosestIter<'a, TTarget, TKey, TVal, TMap, TOut> {
/// A reference to the target key whose distance to the local key determines
/// the order in which the buckets are traversed. The resulting
/// array from projecting the entries of each bucket using `fmap` is
/// sorted according to the distance to the target.
target: &'a TTarget,
/// A reference to all buckets of the `KBucketsTable`.
table: &'a mut KBucketsTable<TKey, TVal>,
/// The iterator over the bucket indices in the order determined by the
/// distance of the local key to the target.
buckets_iter: ClosestBucketsIter,
/// The iterator over the entries in the currently traversed bucket.
iter: Option<arrayvec::IntoIter<[TOut; K_VALUE.get()]>>,
/// The projection function / mapping applied on each bucket as
/// it is encountered, producing the next `iter`ator.
fmap: TMap
}
/// An iterator over the bucket indices, in the order determined by the `Distance` of
/// a target from the `local_key`, such that the entries in the buckets are incrementally
/// further away from the target, starting with the bucket covering the target.
struct ClosestBucketsIter {
/// The distance to the `local_key`.
distance: Distance,
/// The current state of the iterator.
state: ClosestBucketsIterState
}
/// Operating states of a `ClosestBucketsIter`.
enum ClosestBucketsIterState {
/// The starting state of the iterator yields the first bucket index and
/// then transitions to `ZoomIn`.
Start(BucketIndex),
/// The iterator "zooms in" to to yield the next bucket cotaining nodes that
/// are incrementally closer to the local node but further from the `target`.
/// These buckets are identified by a `1` in the corresponding bit position
/// of the distance bit string. When bucket `0` is reached, the iterator
/// transitions to `ZoomOut`.
ZoomIn(BucketIndex),
/// Once bucket `0` has been reached, the iterator starts "zooming out"
/// to buckets containing nodes that are incrementally further away from
/// both the local key and the target. These are identified by a `0` in
/// the corresponding bit position of the distance bit string. When bucket
/// `255` is reached, the iterator transitions to state `Done`.
ZoomOut(BucketIndex),
/// The iterator is in this state once it has visited all buckets.
Done
}
impl ClosestBucketsIter {
fn new(distance: Distance) -> Self {
let state = match BucketIndex::new(&distance) {
Some(i) => ClosestBucketsIterState::Start(i),
None => ClosestBucketsIterState::Start(BucketIndex(0))
};
Self { distance, state }
}
fn next_in(&self, i: BucketIndex) -> Option<BucketIndex> {
(0 .. i.get()).rev().find_map(|i|
if self.distance.0.bit(i) {
Some(BucketIndex(i))
} else {
None
})
}
fn next_out(&self, i: BucketIndex) -> Option<BucketIndex> {
(i.get() + 1 .. NUM_BUCKETS).find_map(|i|
if !self.distance.0.bit(i) {
Some(BucketIndex(i))
} else {
None
})
}
}
impl Iterator for ClosestBucketsIter {
type Item = BucketIndex;
fn next(&mut self) -> Option<Self::Item> {
match self.state {
ClosestBucketsIterState::Start(i) => {
self.state = ClosestBucketsIterState::ZoomIn(i);
Some(i)
}
ClosestBucketsIterState::ZoomIn(i) =>
if let Some(i) = self.next_in(i) {
self.state = ClosestBucketsIterState::ZoomIn(i);
Some(i)
} else {
let i = BucketIndex(0);
self.state = ClosestBucketsIterState::ZoomOut(i);
Some(i)
}
ClosestBucketsIterState::ZoomOut(i) =>
if let Some(i) = self.next_out(i) {
self.state = ClosestBucketsIterState::ZoomOut(i);
Some(i)
} else {
self.state = ClosestBucketsIterState::Done;
None
}
ClosestBucketsIterState::Done => None
}
}
}
impl<TTarget, TKey, TVal, TMap, TOut> Iterator
for ClosestIter<'_, TTarget, TKey, TVal, TMap, TOut>
where
TTarget: AsRef<KeyBytes>,
TKey: Clone + AsRef<KeyBytes>,
TVal: Clone,
TMap: Fn(&KBucket<TKey, TVal>) -> ArrayVec<[TOut; K_VALUE.get()]>,
TOut: AsRef<KeyBytes>
{
type Item = TOut;
fn next(&mut self) -> Option<Self::Item> {
loop {
match &mut self.iter {
Some(iter) => match iter.next() {
Some(k) => return Some(k),
None => self.iter = None
}
None => {
if let Some(i) = self.buckets_iter.next() {
let bucket = &mut self.table.buckets[i.get()];
if let Some(applied) = bucket.apply_pending() {
self.table.applied_pending.push_back(applied)
}
let mut v = (self.fmap)(bucket);
v.sort_by(|a, b|
self.target.as_ref().distance(a.as_ref())
.cmp(&self.target.as_ref().distance(b.as_ref())));
self.iter = Some(v.into_iter());
} else {
return None
}
}
}
}
}
}
/// A reference to a bucket in a `KBucketsTable`.
pub struct KBucketRef<'a, TPeerId, TVal> {
index: BucketIndex,
bucket: &'a mut KBucket<TPeerId, TVal>
}
impl<TKey, TVal> KBucketRef<'_, TKey, TVal>
where
TKey: Clone + AsRef<KeyBytes>,
TVal: Clone
{
/// Returns the number of entries in the bucket.
pub fn num_entries(&self) -> usize {
self.bucket.num_entries()
}
/// Returns true if the bucket has a pending node.
pub fn has_pending(&self) -> bool {
self.bucket.pending().map_or(false, |n| !n.is_ready())
}
/// Tests whether the given distance falls into this bucket.
pub fn contains(&self, d: &Distance) -> bool {
BucketIndex::new(d).map_or(false, |i| i == self.index)
}
/// Generates a random distance that falls into this bucket.
///
/// Together with a known key `a` (e.g. the local key), a random distance `d` for
/// this bucket w.r.t `k` gives rise to the corresponding (random) key `b` s.t.
/// the XOR distance between `a` and `b` is `d`. In other words, it gives
/// rise to a random key falling into this bucket. See [`Key::from_distance`].
pub fn rand_distance(&self, rng: &mut impl rand::Rng) -> Distance {
self.index.rand_distance(rng)
}
}
#[cfg(test)]
mod tests {
use bigint::U256;
use super::*;
use libp2p_core::PeerId;
use quickcheck::*;
use rand::Rng;
type TestTable = KBucketsTable<KeyBytes, ()>;
impl Arbitrary for TestTable {
fn arbitrary<G: Gen>(g: &mut G) -> TestTable {
let local_key = Key::from(PeerId::random());
let timeout = Duration::from_secs(g.gen_range(1, 360));
let mut table = TestTable::new(local_key.clone().into(), timeout);
let mut num_total = g.gen_range(0, 100);
for (i, b) in &mut table.buckets.iter_mut().enumerate().rev() {
let ix = BucketIndex(i);
let num = g.gen_range(0, usize::min(K_VALUE.get(), num_total) + 1);
num_total -= num;
for _ in 0 .. num {
let distance = ix.rand_distance(g);
let key = local_key.for_distance(distance);
let node = Node { key: key.clone(), value: () };
let status = NodeStatus::arbitrary(g);
match b.insert(node, status) {
InsertResult::Inserted => {}
_ => panic!()
}
}
}
table
}
}
#[test]
fn rand_distance() {
fn prop(ix: u8) -> bool {
let d = BucketIndex(ix as usize).rand_distance(&mut rand::thread_rng());
let n = U256::from(<[u8; 32]>::from(d.0));
let b = U256::from(2);
let e = U256::from(ix);
let lower = b.pow(e);
let upper = b.pow(e + U256::from(1)) - U256::from(1);
lower <= n && n <= upper
}
quickcheck(prop as fn(_) -> _);
}
#[test]
fn entry_inserted() {
let local_key = Key::from(PeerId::random());
let other_id = Key::from(PeerId::random());
let mut table = KBucketsTable::<_, ()>::new(local_key, Duration::from_secs(5));
if let Entry::Absent(entry) = table.entry(&other_id) {
match entry.insert((), NodeStatus::Connected) {
InsertResult::Inserted => (),
_ => panic!()
}
} else {
panic!()
}
let res = table.closest_keys(&other_id).collect::<Vec<_>>();
assert_eq!(res.len(), 1);
assert_eq!(res[0], other_id);
}
#[test]
fn entry_self() {
let local_key = Key::from(PeerId::random());
let mut table = KBucketsTable::<_, ()>::new(local_key.clone(), Duration::from_secs(5));
match table.entry(&local_key) {
Entry::SelfEntry => (),
_ => panic!(),
}
}
#[test]
fn closest() {
let local_key = Key::from(PeerId::random());
let mut table = KBucketsTable::<_, ()>::new(local_key, Duration::from_secs(5));
let mut count = 0;
loop {
if count == 100 { break; }
let key = Key::from(PeerId::random());
if let Entry::Absent(e) = table.entry(&key) {
match e.insert((), NodeStatus::Connected) {
InsertResult::Inserted => count += 1,
_ => continue,
}
} else {
panic!("entry exists")
}
}
let mut expected_keys: Vec<_> = table.buckets
.iter()
.flat_map(|t| t.iter().map(|(n,_)| n.key.clone()))
.collect();
for _ in 0 .. 10 {
let target_key = Key::from(PeerId::random());
let keys = table.closest_keys(&target_key).collect::<Vec<_>>();
// The list of keys is expected to match the result of a full-table scan.
expected_keys.sort_by_key(|k| k.distance(&target_key));
assert_eq!(keys, expected_keys);
}
}
#[test]
fn applied_pending() {
let local_key = Key::from(PeerId::random());
let mut table = KBucketsTable::<_, ()>::new(local_key.clone(), Duration::from_millis(1));
let expected_applied;
let full_bucket_index;
loop {
let key = Key::from(PeerId::random());
if let Entry::Absent(e) = table.entry(&key) {
match e.insert((), NodeStatus::Disconnected) {
InsertResult::Full => {
if let Entry::Absent(e) = table.entry(&key) {
match e.insert((), NodeStatus::Connected) {
InsertResult::Pending { disconnected } => {
expected_applied = AppliedPending {
inserted: Node { key: key.clone(), value: () },
evicted: Some(Node { key: disconnected, value: () })
};
full_bucket_index = BucketIndex::new(&key.distance(&local_key));
break
},
_ => panic!()
}
} else {
panic!()
}
},
_ => continue,
}
} else {
panic!("entry exists")
}
}
// Expire the timeout for the pending entry on the full bucket.`
let full_bucket = &mut table.buckets[full_bucket_index.unwrap().get()];
let elapsed = Instant::now() - Duration::from_secs(1);
full_bucket.pending_mut().unwrap().set_ready_at(elapsed);
match table.entry(&expected_applied.inserted.key) {
Entry::Present(_, NodeStatus::Connected) => {}
x => panic!("Unexpected entry: {:?}", x)
}
match table.entry(&expected_applied.evicted.as_ref().unwrap().key) {
Entry::Absent(_) => {}
x => panic!("Unexpected entry: {:?}", x)
}
assert_eq!(Some(expected_applied), table.take_applied_pending());
assert_eq!(None, table.take_applied_pending());
}
#[test]
fn count_nodes_between() {
fn prop(mut table: TestTable, target: Key<PeerId>) -> bool {
let num_to_target = table.count_nodes_between(&target);
let distance = table.local_key.distance(&target);
let base2 = U256::from(2);
let mut iter = ClosestBucketsIter::new(distance);
iter.all(|i| {
// Flip the distance bit related to the bucket.
let d = Distance(distance.0 ^ (base2.pow(U256::from(i.get()))));
let k = table.local_key.for_distance(d);
if distance.0.bit(i.get()) {
// Bit flip `1` -> `0`, the key must be closer than `target`.
d < distance && table.count_nodes_between(&k) <= num_to_target
} else {
// Bit flip `0` -> `1`, the key must be farther than `target`.
d > distance && table.count_nodes_between(&k) >= num_to_target
}
})
}
QuickCheck::new().tests(10).quickcheck(prop as fn(_,_) -> _)
}
}
| take_applied_pending |
test_zpool.py | """
Tests for salt.states.zpool
:codeauthor: Jorge Schrauwen <[email protected]>
:maintainer: Jorge Schrauwen <[email protected]>
:maturity: new
:depends: salt.utils.zfs, salt.modules.zpool
:platform: illumos,freebsd,linux
"""
import salt.loader
import salt.states.zpool as zpool
import salt.utils.zfs
from salt.utils.odict import OrderedDict
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
from tests.support.zfs import ZFSMockData
class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.states.zpool
"""
@classmethod
def setUpClass(cls):
cls.utils_patch = ZFSMockData().get_patched_utils()
@classmethod
def tearDownClass(cls):
cls.utils_patch = None
def setup_loader_modules(self):
self.opts = opts = salt.config.DEFAULT_MINION_OPTS.copy()
utils = salt.loader.utils(opts, whitelist=["zfs"])
zpool_obj = {
zpool: {
"__opts__": opts,
"__grains__": {"kernel": "SunOS"},
"__utils__": utils,
}
}
return zpool_obj
def test_absent_without_pool(self):
"""
Test zpool absent without a pool
"""
ret = {
"name": "myzpool",
"result": True,
"comment": "storage pool myzpool is absent",
"changes": {},
}
mock_exists = MagicMock(return_value=False)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__utils__, self.utils_patch
):
self.assertEqual(zpool.absent("myzpool"), ret)
def test_absent_destroy_pool(self):
"""
Test zpool absent destroying pool
"""
ret = {
"name": "myzpool",
"result": True,
"comment": "storage pool myzpool was destroyed",
"changes": {"myzpool": "destroyed"},
}
mock_exists = MagicMock(return_value=True)
mock_destroy = MagicMock(return_value=OrderedDict([("destroyed", True)]))
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.destroy": mock_destroy}
), patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(zpool.absent("myzpool"), ret)
def test_absent_exporty_pool(self):
"""
Test zpool absent exporting pool
"""
ret = {
"name": "myzpool",
"result": True,
"comment": "storage pool myzpool was exported",
"changes": {"myzpool": "exported"},
}
mock_exists = MagicMock(return_value=True)
mock_destroy = MagicMock(return_value=OrderedDict([("exported", True)]))
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.export": mock_destroy}
), patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(zpool.absent("myzpool", export=True), ret)
def test_absent_busy(self):
"""
Test zpool absent on a busy pool
"""
ret = {
"name": "myzpool",
"result": False,
"comment": "\n".join(
[
"cannot unmount '/myzpool': Device busy",
"cannot export 'myzpool': pool is busy",
]
),
"changes": {},
}
mock_exists = MagicMock(return_value=True)
mock_destroy = MagicMock(
return_value=OrderedDict(
[
("exported", False),
(
"error",
"\n".join(
[
"cannot unmount '/myzpool': Device busy",
"cannot export 'myzpool': pool is busy",
]
),
),
]
)
)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.export": mock_destroy}
), patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(zpool.absent("myzpool", export=True), ret)
def test_present_import_success(self):
"""
Test zpool present with import allowed and unimported pool
"""
ret = {
"name": "myzpool",
"result": True,
"comment": "storage pool myzpool was imported",
"changes": {"myzpool": "imported"},
}
config = {
"import": True,
}
mock_exists = MagicMock(return_value=False)
mock_import = MagicMock(return_value=OrderedDict([("imported", True)]))
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.import": mock_import}
), patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(zpool.present("myzpool", config=config), ret)
def test_present_import_fail(self):
"""
Test zpool present with import allowed and no unimported pool or layout
"""
ret = {
"name": "myzpool",
"result": False,
"comment": "storage pool myzpool was not imported, no (valid) layout specified for creation",
"changes": {},
}
config = {
"import": True,
}
mock_exists = MagicMock(return_value=False)
mock_import = MagicMock(return_value=OrderedDict([("imported", False)]))
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.import": mock_import}
), patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(zpool.present("myzpool", config=config), ret)
def test_present_create_success(self):
"""
Test zpool present with non existing pool
"""
ret = {
"name": "myzpool",
"result": True,
"comment": "storage pool myzpool was created",
"changes": {"myzpool": "created"},
}
config = {
"import": False,
}
layout = [
OrderedDict([("mirror", ["disk0", "disk1"])]),
OrderedDict([("mirror", ["disk2", "disk3"])]),
]
properties = {
"autoexpand": True,
}
filesystem_properties = {
"quota": "5G",
}
mock_exists = MagicMock(return_value=False)
mock_create = MagicMock(
return_value=OrderedDict(
[
("created", True),
(
"vdevs",
OrderedDict(
[
("mirror-0", ["/dev/dsk/disk0", "/dev/dsk/disk1"]),
("mirror-1", ["/dev/dsk/disk2", "/dev/dsk/disk3"]),
]
),
),
]
)
)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.create": mock_create}
), patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(
zpool.present(
"myzpool",
config=config,
layout=layout,
properties=properties,
filesystem_properties=filesystem_properties,
),
ret,
)
def test_present_create_fail(self):
"""
Test zpool present with non existing pool (without a layout)
"""
ret = {
"name": "myzpool",
"result": False,
"comment": "storage pool myzpool was not imported, no (valid) layout specified for creation",
"changes": {},
}
config = {
"import": False,
}
mock_exists = MagicMock(return_value=False)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__utils__, self.utils_patch
):
self.assertEqual(zpool.present("myzpool", config=config), ret)
def test_present_create_passthrough_fail(self):
"""
Test zpool present with non existing pool (without a layout)
"""
ret = {
"name": "myzpool",
"result": False,
"comment": "\n".join(
[
"invalid vdev specification",
"use 'force=True' to override the following errors:",
"/data/salt/vdisk0 is part of exported pool 'zsalt'",
"/data/salt/vdisk1 is part of exported pool 'zsalt'",
]
),
"changes": {},
}
config = {
"force": False,
"import": False,
}
layout = [
OrderedDict([("mirror", ["disk0", "disk1"])]),
OrderedDict([("mirror", ["disk2", "disk3"])]),
]
properties = {
"autoexpand": True,
}
filesystem_properties = {
"quota": "5G",
}
mock_exists = MagicMock(return_value=False)
mock_create = MagicMock(
return_value=OrderedDict(
[
("created", False),
(
"error",
"\n".join(
[
"invalid vdev specification",
"use 'force=True' to override the following errors:",
"/data/salt/vdisk0 is part of exported pool 'zsalt'",
"/data/salt/vdisk1 is part of exported pool 'zsalt'",
]
),
),
]
)
)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.create": mock_create}
), patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(
zpool.present(
"myzpool",
config=config,
layout=layout,
properties=properties,
filesystem_properties=filesystem_properties,
),
ret,
)
def test_present_update_success(self):
"""
Test zpool present with an existing pool that needs an update
"""
ret = {
"name": "myzpool",
"result": True,
"comment": "properties updated",
"changes": {"myzpool": {"autoexpand": False}},
}
config = {
"import": False,
}
layout = [
OrderedDict([("mirror", ["disk0", "disk1"])]),
OrderedDict([("mirror", ["disk2", "disk3"])]),
]
properties = {
"autoexpand": False,
}
mock_exists = MagicMock(return_value=True)
mock_get = MagicMock(
return_value=OrderedDict(
[
("comment", "salt managed pool"),
("freeing", 0),
("listsnapshots", False),
("leaked", 0),
("feature@obsolete_counts", "enabled"),
("feature@sha512", "enabled"),
("delegation", True),
("dedupditto", "0"),
("dedupratio", "1.00x"),
("autoexpand", True),
("feature@bookmarks", "enabled"),
("allocated", 115712),
("guid", 1591906802560842214),
("feature@large_blocks", "enabled"),
("size", 2113929216),
("feature@enabled_txg", "active"),
("feature@hole_birth", "active"),
("capacity", 0),
("feature@multi_vdev_crash_dump", "enabled"),
("feature@extensible_dataset", "enabled"),
("cachefile", "-"),
("bootfs", "-"),
("autoreplace", True),
("readonly", False),
("version", "-"),
("health", "ONLINE"),
("expandsize", "-"),
("feature@embedded_data", "active"),
("feature@lz4_compress", "active"),
("feature@async_destroy", "enabled"),
("feature@skein", "enabled"),
("feature@empty_bpobj", "enabled"),
("feature@spacemap_histogram", "active"),
("bootsize", "-"),
("free", 2113813504),
("feature@device_removal", "enabled"),
("failmode", "wait"),
("feature@filesystem_limits", "enabled"),
("feature@edonr", "enabled"),
("altroot", "-"),
("fragmentation", "0%"),
]
)
)
mock_set = MagicMock(return_value=OrderedDict([("set", True)]))
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"zpool.get": mock_get}
), patch.dict(zpool.__salt__, {"zpool.set": mock_set}), patch.dict(
zpool.__utils__, self.utils_patch
):
self.assertEqual(
zpool.present(
"myzpool", config=config, layout=layout, properties=properties,
),
ret,
)
def | (self):
"""
Test zpool present with non existing pool
"""
config = {
"import": False,
}
layout = [
OrderedDict([("mirror", ["disk0", "disk1"])]),
OrderedDict([("mirror", ["disk2", "disk3"])]),
]
properties = {
"autoexpand": True,
}
mock_exists = MagicMock(return_value=True)
mock_get = MagicMock(
return_value=OrderedDict(
[
("comment", "salt managed pool"),
("freeing", 0),
("listsnapshots", False),
("leaked", 0),
("feature@obsolete_counts", "enabled"),
("feature@sha512", "enabled"),
("delegation", True),
("dedupditto", "0"),
("dedupratio", "1.00x"),
("autoexpand", True),
("feature@bookmarks", "enabled"),
("allocated", 115712),
("guid", 1591906802560842214),
("feature@large_blocks", "enabled"),
("size", 2113929216),
("feature@enabled_txg", "active"),
("feature@hole_birth", "active"),
("capacity", 0),
("feature@multi_vdev_crash_dump", "enabled"),
("feature@extensible_dataset", "enabled"),
("cachefile", "-"),
("bootfs", "-"),
("autoreplace", True),
("readonly", False),
("version", "-"),
("health", "ONLINE"),
("expandsize", "-"),
("feature@embedded_data", "active"),
("feature@lz4_compress", "active"),
("feature@async_destroy", "enabled"),
("feature@skein", "enabled"),
("feature@empty_bpobj", "enabled"),
("feature@spacemap_histogram", "active"),
("bootsize", "-"),
("free", 2113813504),
("feature@device_removal", "enabled"),
("failmode", "wait"),
("feature@filesystem_limits", "enabled"),
("feature@edonr", "enabled"),
("altroot", "-"),
("fragmentation", "0%"),
]
)
)
ret = {
"name": "myzpool",
"result": True,
"comment": "no update needed",
"changes": {},
}
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}):
with patch.dict(zpool.__salt__, {"zpool.get": mock_get}):
with patch.dict(zpool.__utils__, self.utils_patch):
self.assertEqual(
zpool.present(
"myzpool",
config=config,
layout=layout,
properties=properties,
),
ret,
)
# Run state with test=true
ret = {
"name": "myzpool",
"result": True,
"comment": "storage pool myzpool is uptodate",
"changes": {},
}
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}):
with patch.dict(zpool.__salt__, {"zpool.get": mock_get}):
with patch.dict(zpool.__utils__, self.utils_patch):
with patch.dict(zpool.__opts__, {"test": True}):
self.assertEqual(
zpool.present(
"myzpool",
config=config,
layout=layout,
properties=properties,
),
ret,
)
| test_present_update_nochange_success |
rsc_rc.py | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.9.1)
#
# WARNING! All changes made in this file will be lost!
from Qt import QtCore
qt_resource_data = b"\
\x00\x00\x00\x74\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x1f\x00\x00\x00\x1f\x08\x06\x00\x00\x00\x1f\xae\x16\x39\
\x00\x00\x00\x3b\x49\x44\x41\x54\x48\x89\xed\xd5\xb1\x0d\x00\x20\
\x0c\x03\x41\x87\x61\xb3\x93\xa7\x85\x26\x13\x60\x24\x9a\xbf\xde\
\xfa\xd2\xa5\x40\x77\x6f\x49\x65\xfb\x6a\xbf\x92\x78\x8a\x38\x71\
\xe2\xc4\x89\x13\x7f\xaa\x92\xf1\x5c\x2a\x00\x00\xf8\xeb\x00\x57\
\xcc\x06\x4d\x56\xa3\x34\x4b\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x00\x66\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x2d\x49\x44\x41\x54\x18\x95\x63\x60\xa0\x2b\x60\x64\
\x60\x60\x60\x30\x36\x36\xfe\x8f\x4b\xc1\xd9\xb3\x67\x19\x99\x90\
\x15\xe3\x32\x84\x09\x5d\x00\x1b\x9f\x09\x87\x04\x2e\x93\x29\x04\
\x00\x23\x70\x04\x0e\x7e\xcf\x74\x12\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x03\xb6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x03\x22\x69\x54\x58\x74\x58\x4d\x4c\
\x3a\x63\x6f\x6d\x2e\x61\x64\x6f\x62\x65\x2e\x78\x6d\x70\x00\x00\
\x00\x00\x00\x3c\x3f\x78\x70\x61\x63\x6b\x65\x74\x20\x62\x65\x67\
\x69\x6e\x3d\x22\xef\xbb\xbf\x22\x20\x69\x64\x3d\x22\x57\x35\x4d\
\x30\x4d\x70\x43\x65\x68\x69\x48\x7a\x72\x65\x53\x7a\x4e\x54\x63\
\x7a\x6b\x63\x39\x64\x22\x3f\x3e\x20\x3c\x78\x3a\x78\x6d\x70\x6d\
\x65\x74\x61\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x3d\x22\x61\x64\x6f\
\x62\x65\x3a\x6e\x73\x3a\x6d\x65\x74\x61\x2f\x22\x20\x78\x3a\x78\
\x6d\x70\x74\x6b\x3d\x22\x41\x64\x6f\x62\x65\x20\x58\x4d\x50\x20\
\x43\x6f\x72\x65\x20\x35\x2e\x33\x2d\x63\x30\x31\x31\x20\x36\x36\
\x2e\x31\x34\x35\x36\x36\x31\x2c\x20\x32\x30\x31\x32\x2f\x30\x32\
\x2f\x30\x36\x2d\x31\x34\x3a\x35\x36\x3a\x32\x37\x20\x20\x20\x20\
\x20\x20\x20\x20\x22\x3e\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x20\
\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\
\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\
\x61\x78\x2d\x6e\x73\x23\x22\x3e\x20\x3c\x72\x64\x66\x3a\x44\x65\
\x73\x63\x72\x69\x70\x74\x69\x6f\x6e\x20\x72\x64\x66\x3a\x61\x62\
\x6f\x75\x74\x3d\x22\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\
\x65\x2e\x63\x6f\x6d\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x22\x20\
\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\x4d\x4d\x3d\x22\x68\x74\x74\
\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\
\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x6d\x6d\x2f\x22\x20\x78\x6d\
\x6c\x6e\x73\x3a\x73\x74\x52\x65\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\x2f\x78\
\x61\x70\x2f\x31\x2e\x30\x2f\x73\x54\x79\x70\x65\x2f\x52\x65\x73\
\x6f\x75\x72\x63\x65\x52\x65\x66\x23\x22\x20\x78\x6d\x70\x3a\x43\
\x72\x65\x61\x74\x6f\x72\x54\x6f\x6f\x6c\x3d\x22\x41\x64\x6f\x62\
\x65\x20\x50\x68\x6f\x74\x6f\x73\x68\x6f\x70\x20\x43\x53\x36\x20\
\x28\x57\x69\x6e\x64\x6f\x77\x73\x29\x22\x20\x78\x6d\x70\x4d\x4d\
\x3a\x49\x6e\x73\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\
\x2e\x69\x69\x64\x3a\x33\x41\x36\x34\x43\x30\x31\x45\x45\x38\x42\
\x44\x31\x31\x45\x38\x38\x32\x39\x37\x42\x37\x46\x42\x46\x42\x34\
\x45\x32\x41\x30\x31\x22\x20\x78\x6d\x70\x4d\x4d\x3a\x44\x6f\x63\
\x75\x6d\x65\x6e\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\
\x3a\x33\x41\x36\x34\x43\x30\x31\x46\x45\x38\x42\x44\x31\x31\x45\
\x38\x38\x32\x39\x37\x42\x37\x46\x42\x46\x42\x34\x45\x32\x41\x30\
\x31\x22\x3e\x20\x3c\x78\x6d\x70\x4d\x4d\x3a\x44\x65\x72\x69\x76\
\x65\x64\x46\x72\x6f\x6d\x20\x73\x74\x52\x65\x66\x3a\x69\x6e\x73\
\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\x2e\x69\x69\x64\
\x3a\x33\x41\x36\x34\x43\x30\x31\x43\x45\x38\x42\x44\x31\x31\x45\
\x38\x38\x32\x39\x37\x42\x37\x46\x42\x46\x42\x34\x45\x32\x41\x30\
\x31\x22\x20\x73\x74\x52\x65\x66\x3a\x64\x6f\x63\x75\x6d\x65\x6e\
\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\x3a\x33\x41\x36\
\x34\x43\x30\x31\x44\x45\x38\x42\x44\x31\x31\x45\x38\x38\x32\x39\
\x37\x42\x37\x46\x42\x46\x42\x34\x45\x32\x41\x30\x31\x22\x2f\x3e\
\x20\x3c\x2f\x72\x64\x66\x3a\x44\x65\x73\x63\x72\x69\x70\x74\x69\
\x6f\x6e\x3e\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\x3e\x20\x3c\
\x2f\x78\x3a\x78\x6d\x70\x6d\x65\x74\x61\x3e\x20\x3c\x3f\x78\x70\
\x61\x63\x6b\x65\x74\x20\x65\x6e\x64\x3d\x22\x72\x22\x3f\x3e\xa2\
\x0b\x58\x7d\x00\x00\x00\x2a\x49\x44\x41\x54\x78\xda\x62\xfc\xff\
\xff\x3f\x03\x21\xc0\xc4\x40\x04\x60\x81\x31\xce\x9e\x3d\x8b\x61\
\xa4\xb1\xb1\x31\x23\xd1\x26\xd1\x59\x11\x23\xd5\x82\x00\x20\xc0\
\x00\x99\x75\x0a\x0d\x07\x98\x52\x81\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x03\xcc\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x03\x22\x69\x54\x58\x74\x58\x4d\x4c\
\x3a\x63\x6f\x6d\x2e\x61\x64\x6f\x62\x65\x2e\x78\x6d\x70\x00\x00\
\x00\x00\x00\x3c\x3f\x78\x70\x61\x63\x6b\x65\x74\x20\x62\x65\x67\
\x69\x6e\x3d\x22\xef\xbb\xbf\x22\x20\x69\x64\x3d\x22\x57\x35\x4d\
\x30\x4d\x70\x43\x65\x68\x69\x48\x7a\x72\x65\x53\x7a\x4e\x54\x63\
\x7a\x6b\x63\x39\x64\x22\x3f\x3e\x20\x3c\x78\x3a\x78\x6d\x70\x6d\
\x65\x74\x61\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x3d\x22\x61\x64\x6f\
\x62\x65\x3a\x6e\x73\x3a\x6d\x65\x74\x61\x2f\x22\x20\x78\x3a\x78\
\x6d\x70\x74\x6b\x3d\x22\x41\x64\x6f\x62\x65\x20\x58\x4d\x50\x20\
\x43\x6f\x72\x65\x20\x35\x2e\x33\x2d\x63\x30\x31\x31\x20\x36\x36\
\x2e\x31\x34\x35\x36\x36\x31\x2c\x20\x32\x30\x31\x32\x2f\x30\x32\
\x2f\x30\x36\x2d\x31\x34\x3a\x35\x36\x3a\x32\x37\x20\x20\x20\x20\
\x20\x20\x20\x20\x22\x3e\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x20\
\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\
\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\
\x61\x78\x2d\x6e\x73\x23\x22\x3e\x20\x3c\x72\x64\x66\x3a\x44\x65\
\x73\x63\x72\x69\x70\x74\x69\x6f\x6e\x20\x72\x64\x66\x3a\x61\x62\
\x6f\x75\x74\x3d\x22\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\
\x65\x2e\x63\x6f\x6d\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x22\x20\
\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\x4d\x4d\x3d\x22\x68\x74\x74\
\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\
\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x6d\x6d\x2f\x22\x20\x78\x6d\
\x6c\x6e\x73\x3a\x73\x74\x52\x65\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\x2f\x78\
\x61\x70\x2f\x31\x2e\x30\x2f\x73\x54\x79\x70\x65\x2f\x52\x65\x73\
\x6f\x75\x72\x63\x65\x52\x65\x66\x23\x22\x20\x78\x6d\x70\x3a\x43\
\x72\x65\x61\x74\x6f\x72\x54\x6f\x6f\x6c\x3d\x22\x41\x64\x6f\x62\
\x65\x20\x50\x68\x6f\x74\x6f\x73\x68\x6f\x70\x20\x43\x53\x36\x20\
\x28\x57\x69\x6e\x64\x6f\x77\x73\x29\x22\x20\x78\x6d\x70\x4d\x4d\
\x3a\x49\x6e\x73\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\
\x2e\x69\x69\x64\x3a\x30\x36\x37\x43\x46\x33\x34\x35\x45\x38\x42\
\x43\x31\x31\x45\x38\x41\x34\x34\x37\x46\x31\x46\x34\x42\x34\x36\
\x37\x45\x41\x45\x37\x22\x20\x78\x6d\x70\x4d\x4d\x3a\x44\x6f\x63\
\x75\x6d\x65\x6e\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\
\x3a\x30\x36\x37\x43\x46\x33\x34\x36\x45\x38\x42\x43\x31\x31\x45\
\x38\x41\x34\x34\x37\x46\x31\x46\x34\x42\x34\x36\x37\x45\x41\x45\
\x37\x22\x3e\x20\x3c\x78\x6d\x70\x4d\x4d\x3a\x44\x65\x72\x69\x76\
\x65\x64\x46\x72\x6f\x6d\x20\x73\x74\x52\x65\x66\x3a\x69\x6e\x73\
\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\x2e\x69\x69\x64\
\x3a\x30\x36\x37\x43\x46\x33\x34\x33\x45\x38\x42\x43\x31\x31\x45\
\x38\x41\x34\x34\x37\x46\x31\x46\x34\x42\x34\x36\x37\x45\x41\x45\
\x37\x22\x20\x73\x74\x52\x65\x66\x3a\x64\x6f\x63\x75\x6d\x65\x6e\
\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\x3a\x30\x36\x37\
\x43\x46\x33\x34\x34\x45\x38\x42\x43\x31\x31\x45\x38\x41\x34\x34\
\x37\x46\x31\x46\x34\x42\x34\x36\x37\x45\x41\x45\x37\x22\x2f\x3e\
\x20\x3c\x2f\x72\x64\x66\x3a\x44\x65\x73\x63\x72\x69\x70\x74\x69\
\x6f\x6e\x3e\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\x3e\x20\x3c\
\x2f\x78\x3a\x78\x6d\x70\x6d\x65\x74\x61\x3e\x20\x3c\x3f\x78\x70\
\x61\x63\x6b\x65\x74\x20\x65\x6e\x64\x3d\x22\x72\x22\x3f\x3e\x98\
\xe2\x1c\x8b\x00\x00\x00\x40\x49\x44\x41\x54\x78\xda\x62\xfc\xff\
\xff\x3f\x03\x21\xc0\x82\xcc\x39\x7b\xf6\x2c\x48\x07\x23\x8c\x6f\
\x6c\x6c\x0c\xa6\x99\xb0\x68\xc4\x30\x9a\x09\x87\x0d\xff\x89\x51\
\x84\xa2\x10\x9f\x22\x46\x42\x8a\x18\x09\x59\xc7\x88\xec\x33\xb0\
\x00\x31\xe1\x04\x10\x60\x00\x1f\x57\x0d\xa6\xcc\xae\xd6\xc8\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\x6a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x1f\x00\x00\x00\x1f\x08\x06\x00\x00\x00\x1f\xae\x16\x39\
\x00\x00\x00\x31\x49\x44\x41\x54\x48\x89\xed\xcd\xb1\x0d\x00\x30\
\x0c\xc3\xb0\x20\xc7\xfa\xff\x0f\xda\x39\xb3\x57\x6a\x17\x38\x53\
\x94\xe4\x35\xff\x36\x73\x1b\x1c\x0e\x87\xc3\xe1\x70\x38\x1c\x0e\
\x87\xc3\xe1\xa7\x0f\x79\x12\x02\x6e\xab\x28\xbe\xf1\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\x92\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x1f\x00\x00\x00\x1f\x08\x06\x00\x00\x00\x1f\xae\x16\x39\
\x00\x00\x00\x59\x49\x44\x41\x54\x48\x89\xed\xd6\x31\x0a\x00\x21\
\x0c\x44\xd1\x64\xd9\xb3\x26\x67\xca\x69\xd7\xc6\xce\x46\x1c\x41\
\x58\xff\xf4\xc3\x43\x09\x24\x6e\x42\x22\xe2\x33\x33\xaf\xaa\xa5\
\xfe\xa3\xe0\x6a\xc0\xc1\xc1\xc1\xc1\xc1\xb7\xc6\x95\x72\x5f\xa9\
\xcb\x79\x95\x72\xcf\xb0\xcf\x33\x73\xaa\x78\xf4\xdb\x77\xbc\x7c\
\xc8\xec\x65\x73\xef\xb4\x83\x83\x83\x83\xff\x17\x6f\xcc\x54\x0b\
\x3b\xe0\x85\xdc\x63\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x00\x71\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x38\x49\x44\x41\x54\x18\x95\xbd\xcb\xc1\x0d\x00\x20\
\x0c\x42\xd1\xa7\x2b\x75\xff\x11\x3a\x93\x9e\x4c\xaa\x89\x37\x23\
\x17\xc2\x07\x78\xa5\x06\x11\x51\xd9\x58\x1c\x32\x53\x3f\x4e\xe3\
\x70\xd8\x46\x5b\x51\x73\xbf\x0c\x6e\xc7\x1f\x9a\x83\xe9\x0a\x02\
\xd2\xeb\x37\x71\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x00\xbe\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x05\x00\x00\x00\x05\x08\x06\x00\x00\x00\x8d\x6f\x26\xe5\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x06\
\x62\x4b\x47\x44\x00\x00\x00\x00\x00\x00\xf9\x43\xbb\x7f\x00\x00\
\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x12\x00\x00\x0b\x12\x01\xd2\
\xdd\x7e\xfc\x00\x00\x00\x22\x49\x44\x41\x54\x08\xd7\x5d\xc8\x31\
\x01\x00\x30\x0c\x02\xb0\xa0\x7c\x15\x88\xa8\xfd\xe4\x0c\xa3\xed\
\x65\x03\x2f\x1b\x90\x0d\xc8\x06\x7c\x46\xd2\x10\x75\x18\x9e\xcf\
\xd6\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\x77\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x3e\x49\x44\x41\x54\x18\x95\x85\xcd\xc1\x11\x00\x30\
\x04\x44\xd1\x2f\x2d\xe9\xbf\x36\x39\xc9\x88\x10\x4e\x6b\xe7\x0d\
\xa2\xaa\x0c\x63\x6b\x02\x00\x3f\x64\x1e\x3a\x64\x21\xcb\x4a\xc5\
\x03\xe2\x25\xeb\x40\x7e\x57\x02\x47\x57\x51\xec\xe7\x92\x74\x00\
\x60\x03\x7a\x63\x09\xaa\xdb\xad\xe8\x0b\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xa2\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x69\x49\x44\x41\x54\x18\x95\x8d\xcf\x31\x0a\xc3\x30\
\x10\x44\xd1\x17\x13\xf0\x85\xd4\x0a\xd2\xe5\x0a\x2e\x7d\xac\x94\
\x2a\x7d\x0f\xb5\xba\x90\xbb\x34\x6b\x23\x27\x08\x3c\xd5\xec\xf0\
\x61\x67\xb8\xa1\x07\xb4\xd6\x60\xc5\x82\x8c\x8a\x0d\x25\xa5\xe4\
\x19\xf0\x8a\x0f\xe6\xb8\xdf\x78\x85\x2f\x53\x98\xa5\x03\x0e\xcd\
\x91\x3b\xa0\x3c\xa8\x93\x7b\xa8\x0e\xa0\xda\x43\x1b\xf6\x1f\x60\
\x8f\xfc\x2c\x5e\xba\x6e\x97\x75\x83\x0f\xff\xfa\x02\x09\x67\x11\
\x78\xb7\x1a\x2c\xbd\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x00\xb8\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x7f\x49\x44\x41\x54\x18\x95\x8d\xcd\xab\x11\xc2\x40\
\x18\x04\xe0\x0f\xe8\x22\x6d\xa4\x81\xb3\x51\x91\xb1\x48\x0a\x60\
\x28\x02\x98\xc1\x22\xb1\x91\xa7\x30\x88\x2b\x80\x74\x80\xbf\x36\
\x30\x7f\x98\x0c\x2a\x6b\x76\xf6\x31\xbb\xac\xc0\x06\xda\xb6\xbd\
\x23\xe3\xb9\xc8\x3a\xf4\xd3\x34\x1d\xb6\x61\x64\x3c\x90\x42\xa7\
\xd0\x19\x76\xd0\x34\xcd\x07\x6f\x8c\xb1\x7e\xc3\x80\x57\xad\xd5\
\xbc\x04\x05\x57\x9c\x83\xcb\x1c\x2c\x4b\x09\x47\x9c\x82\xd3\x7f\
\xa9\x8b\xab\x01\x97\xe0\x31\xfc\x5f\xa9\xc7\x7e\x71\x51\x42\xf7\
\xd6\xe2\x0b\x67\xa1\x1b\x23\x0e\x49\xd4\x13\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xab\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x72\x49\x44\x41\x54\x18\x95\x8d\xcd\xa1\x11\x02\x31\
\x14\x84\xe1\x0f\x6a\x4a\x01\xb1\x51\x27\xcf\x22\x29\x00\x28\x02\
\xae\x00\x24\x16\x19\x75\x36\x05\xa4\x28\xcc\xcb\x4c\x06\x75\x6b\
\x76\xf6\xed\x3f\x6f\x39\xa0\x13\xf4\xde\xdf\xa8\xd8\xa7\xae\x60\
\x49\x29\x5d\xcf\x71\xa8\xf8\x20\x47\xce\x91\x2b\x0c\x68\xc7\x8a\
\x2f\xee\xe1\xeb\xf8\x3c\x20\x68\xd8\xf0\x0c\x6f\xa3\x98\xa1\x8c\
\x1b\x1e\xe1\xf9\x1f\x2a\xd3\xc4\x6b\x9a\x2e\x33\xb4\xe0\x32\x4d\
\xb4\xc8\x8b\xa3\xfa\x01\x47\x23\x18\x0e\xd6\x51\xab\xd9\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xc7\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x03\x22\x69\x54\x58\x74\x58\x4d\x4c\
\x3a\x63\x6f\x6d\x2e\x61\x64\x6f\x62\x65\x2e\x78\x6d\x70\x00\x00\
\x00\x00\x00\x3c\x3f\x78\x70\x61\x63\x6b\x65\x74\x20\x62\x65\x67\
\x69\x6e\x3d\x22\xef\xbb\xbf\x22\x20\x69\x64\x3d\x22\x57\x35\x4d\
\x30\x4d\x70\x43\x65\x68\x69\x48\x7a\x72\x65\x53\x7a\x4e\x54\x63\
\x7a\x6b\x63\x39\x64\x22\x3f\x3e\x20\x3c\x78\x3a\x78\x6d\x70\x6d\
\x65\x74\x61\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x3d\x22\x61\x64\x6f\
\x62\x65\x3a\x6e\x73\x3a\x6d\x65\x74\x61\x2f\x22\x20\x78\x3a\x78\
\x6d\x70\x74\x6b\x3d\x22\x41\x64\x6f\x62\x65\x20\x58\x4d\x50\x20\
\x43\x6f\x72\x65\x20\x35\x2e\x33\x2d\x63\x30\x31\x31\x20\x36\x36\
\x2e\x31\x34\x35\x36\x36\x31\x2c\x20\x32\x30\x31\x32\x2f\x30\x32\
\x2f\x30\x36\x2d\x31\x34\x3a\x35\x36\x3a\x32\x37\x20\x20\x20\x20\
\x20\x20\x20\x20\x22\x3e\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x20\
\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\
\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\
\x61\x78\x2d\x6e\x73\x23\x22\x3e\x20\x3c\x72\x64\x66\x3a\x44\x65\
\x73\x63\x72\x69\x70\x74\x69\x6f\x6e\x20\x72\x64\x66\x3a\x61\x62\
\x6f\x75\x74\x3d\x22\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\
\x65\x2e\x63\x6f\x6d\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x22\x20\
\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\x4d\x4d\x3d\x22\x68\x74\x74\
\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\
\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x6d\x6d\x2f\x22\x20\x78\x6d\
\x6c\x6e\x73\x3a\x73\x74\x52\x65\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\x2f\x78\
\x61\x70\x2f\x31\x2e\x30\x2f\x73\x54\x79\x70\x65\x2f\x52\x65\x73\
\x6f\x75\x72\x63\x65\x52\x65\x66\x23\x22\x20\x78\x6d\x70\x3a\x43\
\x72\x65\x61\x74\x6f\x72\x54\x6f\x6f\x6c\x3d\x22\x41\x64\x6f\x62\
\x65\x20\x50\x68\x6f\x74\x6f\x73\x68\x6f\x70\x20\x43\x53\x36\x20\
\x28\x57\x69\x6e\x64\x6f\x77\x73\x29\x22\x20\x78\x6d\x70\x4d\x4d\
\x3a\x49\x6e\x73\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\
\x2e\x69\x69\x64\x3a\x36\x43\x36\x42\x35\x32\x43\x45\x45\x38\x42\
\x44\x31\x31\x45\x38\x38\x34\x31\x37\x44\x45\x44\x43\x45\x38\x39\
\x39\x35\x30\x37\x37\x22\x20\x78\x6d\x70\x4d\x4d\x3a\x44\x6f\x63\
\x75\x6d\x65\x6e\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\
\x3a\x36\x43\x36\x42\x35\x32\x43\x46\x45\x38\x42\x44\x31\x31\x45\
\x38\x38\x34\x31\x37\x44\x45\x44\x43\x45\x38\x39\x39\x35\x30\x37\
\x37\x22\x3e\x20\x3c\x78\x6d\x70\x4d\x4d\x3a\x44\x65\x72\x69\x76\
\x65\x64\x46\x72\x6f\x6d\x20\x73\x74\x52\x65\x66\x3a\x69\x6e\x73\
\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\x2e\x69\x69\x64\
\x3a\x36\x43\x36\x42\x35\x32\x43\x43\x45\x38\x42\x44\x31\x31\x45\
\x38\x38\x34\x31\x37\x44\x45\x44\x43\x45\x38\x39\x39\x35\x30\x37\
\x37\x22\x20\x73\x74\x52\x65\x66\x3a\x64\x6f\x63\x75\x6d\x65\x6e\
\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\x3a\x36\x43\x36\
\x42\x35\x32\x43\x44\x45\x38\x42\x44\x31\x31\x45\x38\x38\x34\x31\
\x37\x44\x45\x44\x43\x45\x38\x39\x39\x35\x30\x37\x37\x22\x2f\x3e\
\x20\x3c\x2f\x72\x64\x66\x3a\x44\x65\x73\x63\x72\x69\x70\x74\x69\
\x6f\x6e\x3e\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\x3e\x20\x3c\
\x2f\x78\x3a\x78\x6d\x70\x6d\x65\x74\x61\x3e\x20\x3c\x3f\x78\x70\
\x61\x63\x6b\x65\x74\x20\x65\x6e\x64\x3d\x22\x72\x22\x3f\x3e\x03\
\x3c\x14\x29\x00\x00\x00\x3b\x49\x44\x41\x54\x78\xda\x62\x3c\x73\
\xe6\xcc\x7f\x06\x02\x80\x05\x4a\x33\x1a\x1b\x1b\xc3\x05\xcf\x9e\
\x3d\x8b\xac\xe6\x3f\x13\x03\x11\x80\x28\x45\x2c\x48\x56\x60\x73\
\x1b\x23\x8a\x22\x7c\xee\xa2\xbe\x9b\xfe\x63\xf1\x3a\x1c\x00\x04\
\x18\x00\xce\x62\x11\x4b\x10\x70\x41\xf5\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xcb\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x03\x22\x69\x54\x58\x74\x58\x4d\x4c\
\x3a\x63\x6f\x6d\x2e\x61\x64\x6f\x62\x65\x2e\x78\x6d\x70\x00\x00\
\x00\x00\x00\x3c\x3f\x78\x70\x61\x63\x6b\x65\x74\x20\x62\x65\x67\
\x69\x6e\x3d\x22\xef\xbb\xbf\x22\x20\x69\x64\x3d\x22\x57\x35\x4d\
\x30\x4d\x70\x43\x65\x68\x69\x48\x7a\x72\x65\x53\x7a\x4e\x54\x63\
\x7a\x6b\x63\x39\x64\x22\x3f\x3e\x20\x3c\x78\x3a\x78\x6d\x70\x6d\
\x65\x74\x61\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x3d\x22\x61\x64\x6f\
\x62\x65\x3a\x6e\x73\x3a\x6d\x65\x74\x61\x2f\x22\x20\x78\x3a\x78\
\x6d\x70\x74\x6b\x3d\x22\x41\x64\x6f\x62\x65\x20\x58\x4d\x50\x20\
\x43\x6f\x72\x65\x20\x35\x2e\x33\x2d\x63\x30\x31\x31\x20\x36\x36\
\x2e\x31\x34\x35\x36\x36\x31\x2c\x20\x32\x30\x31\x32\x2f\x30\x32\
\x2f\x30\x36\x2d\x31\x34\x3a\x35\x36\x3a\x32\x37\x20\x20\x20\x20\
\x20\x20\x20\x20\x22\x3e\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x20\
\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\
\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\
\x61\x78\x2d\x6e\x73\x23\x22\x3e\x20\x3c\x72\x64\x66\x3a\x44\x65\
\x73\x63\x72\x69\x70\x74\x69\x6f\x6e\x20\x72\x64\x66\x3a\x61\x62\
\x6f\x75\x74\x3d\x22\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\
\x65\x2e\x63\x6f\x6d\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x22\x20\
\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\x4d\x4d\x3d\x22\x68\x74\x74\
\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\
\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x6d\x6d\x2f\x22\x20\x78\x6d\
\x6c\x6e\x73\x3a\x73\x74\x52\x65\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\x2f\x78\
\x61\x70\x2f\x31\x2e\x30\x2f\x73\x54\x79\x70\x65\x2f\x52\x65\x73\
\x6f\x75\x72\x63\x65\x52\x65\x66\x23\x22\x20\x78\x6d\x70\x3a\x43\
\x72\x65\x61\x74\x6f\x72\x54\x6f\x6f\x6c\x3d\x22\x41\x64\x6f\x62\
\x65\x20\x50\x68\x6f\x74\x6f\x73\x68\x6f\x70\x20\x43\x53\x36\x20\
\x28\x57\x69\x6e\x64\x6f\x77\x73\x29\x22\x20\x78\x6d\x70\x4d\x4d\
\x3a\x49\x6e\x73\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\
\x2e\x69\x69\x64\x3a\x45\x42\x37\x44\x39\x35\x35\x36\x45\x38\x42\
\x42\x31\x31\x45\x38\x39\x39\x33\x37\x38\x39\x36\x37\x39\x33\x32\
\x33\x34\x42\x44\x44\x22\x20\x78\x6d\x70\x4d\x4d\x3a\x44\x6f\x63\
\x75\x6d\x65\x6e\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\
\x3a\x45\x42\x37\x44\x39\x35\x35\x37\x45\x38\x42\x42\x31\x31\x45\
\x38\x39\x39\x33\x37\x38\x39\x36\x37\x39\x33\x32\x33\x34\x42\x44\
\x44\x22\x3e\x20\x3c\x78\x6d\x70\x4d\x4d\x3a\x44\x65\x72\x69\x76\
\x65\x64\x46\x72\x6f\x6d\x20\x73\x74\x52\x65\x66\x3a\x69\x6e\x73\
\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\x2e\x69\x69\x64\
\x3a\x45\x42\x37\x44\x39\x35\x35\x34\x45\x38\x42\x42\x31\x31\x45\
\x38\x39\x39\x33\x37\x38\x39\x36\x37\x39\x33\x32\x33\x34\x42\x44\
\x44\x22\x20\x73\x74\x52\x65\x66\x3a\x64\x6f\x63\x75\x6d\x65\x6e\
\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\x3a\x45\x42\x37\
\x44\x39\x35\x35\x35\x45\x38\x42\x42\x31\x31\x45\x38\x39\x39\x33\
\x37\x38\x39\x36\x37\x39\x33\x32\x33\x34\x42\x44\x44\x22\x2f\x3e\
\x20\x3c\x2f\x72\x64\x66\x3a\x44\x65\x73\x63\x72\x69\x70\x74\x69\
\x6f\x6e\x3e\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\x3e\x20\x3c\
\x2f\x78\x3a\x78\x6d\x70\x6d\x65\x74\x61\x3e\x20\x3c\x3f\x78\x70\
\x61\x63\x6b\x65\x74\x20\x65\x6e\x64\x3d\x22\x72\x22\x3f\x3e\x67\
\x8b\x61\xe5\x00\x00\x00\x3f\x49\x44\x41\x54\x78\xda\x62\xfc\xff\
\xff\x3f\x03\x21\xc0\x82\xcc\x39\x7b\xf6\x2c\x8c\xf9\xdf\xd8\xd8\
\x98\x11\xc6\x61\xc2\xa6\x00\xdd\x24\x26\x34\x3e\x56\xbb\x99\x08\
\x29\x40\x57\xc4\x48\x8c\x22\x9c\x0a\x99\xb0\x88\x61\x28\x64\x24\
\x26\x9c\x00\x02\x0c\x00\x6b\x58\x0f\x74\x3a\x78\xe7\x18\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xa2\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x69\x49\x44\x41\x54\x18\x95\x8d\xcf\x31\x0a\xc3\x30\
\x10\x44\xd1\x17\x13\xf0\x81\x54\x0b\xd2\xe5\x0a\x2e\x7d\xac\x94\
\x2a\x7d\x0f\xd5\x3a\x90\xbb\x34\x6b\x23\x27\x08\x3c\xd5\xec\xf0\
\x61\x67\xb8\xa1\x07\xa4\x94\x60\xc5\x82\x8c\x8a\x0d\xa5\xb5\xe6\
\x19\xf0\x8a\x0f\xe6\xb8\xdf\x78\x85\x2f\x53\x98\xa5\x03\x0e\xcd\
\x91\x3b\xa0\x3c\xa8\x93\x7b\xa8\x0e\xa0\xda\x43\x1b\xf6\x1f\x60\
\x8f\xfc\x2c\x5e\xba\x6e\x97\x75\x83\x0f\xff\xfa\x02\xd7\x24\x11\
\x78\x18\xc5\x9a\x1e\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x00\x70\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x37\x49\x44\x41\x54\x18\x95\x63\x60\x20\x02\x30\xc2\
\x18\xc6\xc6\xc6\xc8\xe2\xff\xcf\x9e\x3d\x0b\x97\x63\xc2\xa2\xf1\
\x3f\xba\x00\xba\x22\x0c\x05\xe8\x8a\xb0\x2a\x40\x57\xc4\x48\x8c\
\x22\x9c\x0a\xb1\x39\x1c\xa7\x89\x78\x01\x00\xb6\xdc\x07\x0e\xb6\
\x96\x48\x81\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xc8\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x03\x22\x69\x54\x58\x74\x58\x4d\x4c\
\x3a\x63\x6f\x6d\x2e\x61\x64\x6f\x62\x65\x2e\x78\x6d\x70\x00\x00\
\x00\x00\x00\x3c\x3f\x78\x70\x61\x63\x6b\x65\x74\x20\x62\x65\x67\
\x69\x6e\x3d\x22\xef\xbb\xbf\x22\x20\x69\x64\x3d\x22\x57\x35\x4d\
\x30\x4d\x70\x43\x65\x68\x69\x48\x7a\x72\x65\x53\x7a\x4e\x54\x63\
\x7a\x6b\x63\x39\x64\x22\x3f\x3e\x20\x3c\x78\x3a\x78\x6d\x70\x6d\
\x65\x74\x61\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x3d\x22\x61\x64\x6f\
\x62\x65\x3a\x6e\x73\x3a\x6d\x65\x74\x61\x2f\x22\x20\x78\x3a\x78\
\x6d\x70\x74\x6b\x3d\x22\x41\x64\x6f\x62\x65\x20\x58\x4d\x50\x20\
\x43\x6f\x72\x65\x20\x35\x2e\x33\x2d\x63\x30\x31\x31\x20\x36\x36\
\x2e\x31\x34\x35\x36\x36\x31\x2c\x20\x32\x30\x31\x32\x2f\x30\x32\
\x2f\x30\x36\x2d\x31\x34\x3a\x35\x36\x3a\x32\x37\x20\x20\x20\x20\
\x20\x20\x20\x20\x22\x3e\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x20\
\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\
\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\
\x61\x78\x2d\x6e\x73\x23\x22\x3e\x20\x3c\x72\x64\x66\x3a\x44\x65\
\x73\x63\x72\x69\x70\x74\x69\x6f\x6e\x20\x72\x64\x66\x3a\x61\x62\
\x6f\x75\x74\x3d\x22\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\
\x65\x2e\x63\x6f\x6d\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x22\x20\
\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\x4d\x4d\x3d\x22\x68\x74\x74\
\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\
\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x6d\x6d\x2f\x22\x20\x78\x6d\
\x6c\x6e\x73\x3a\x73\x74\x52\x65\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\x2f\x78\
\x61\x70\x2f\x31\x2e\x30\x2f\x73\x54\x79\x70\x65\x2f\x52\x65\x73\
\x6f\x75\x72\x63\x65\x52\x65\x66\x23\x22\x20\x78\x6d\x70\x3a\x43\
\x72\x65\x61\x74\x6f\x72\x54\x6f\x6f\x6c\x3d\x22\x41\x64\x6f\x62\
\x65\x20\x50\x68\x6f\x74\x6f\x73\x68\x6f\x70\x20\x43\x53\x36\x20\
\x28\x57\x69\x6e\x64\x6f\x77\x73\x29\x22\x20\x78\x6d\x70\x4d\x4d\
\x3a\x49\x6e\x73\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\
\x2e\x69\x69\x64\x3a\x43\x46\x42\x41\x36\x35\x36\x39\x45\x38\x42\
\x42\x31\x31\x45\x38\x39\x36\x41\x30\x38\x42\x38\x37\x32\x43\x42\
\x36\x30\x43\x44\x35\x22\x20\x78\x6d\x70\x4d\x4d\x3a\x44\x6f\x63\
\x75\x6d\x65\x6e\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\
\x3a\x43\x46\x42\x41\x36\x35\x36\x41\x45\x38\x42\x42\x31\x31\x45\
\x38\x39\x36\x41\x30\x38\x42\x38\x37\x32\x43\x42\x36\x30\x43\x44\
\x35\x22\x3e\x20\x3c\x78\x6d\x70\x4d\x4d\x3a\x44\x65\x72\x69\x76\
\x65\x64\x46\x72\x6f\x6d\x20\x73\x74\x52\x65\x66\x3a\x69\x6e\x73\
\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\x2e\x69\x69\x64\
\x3a\x43\x46\x42\x41\x36\x35\x36\x37\x45\x38\x42\x42\x31\x31\x45\
\x38\x39\x36\x41\x30\x38\x42\x38\x37\x32\x43\x42\x36\x30\x43\x44\
\x35\x22\x20\x73\x74\x52\x65\x66\x3a\x64\x6f\x63\x75\x6d\x65\x6e\
\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\x3a\x43\x46\x42\
\x41\x36\x35\x36\x38\x45\x38\x42\x42\x31\x31\x45\x38\x39\x36\x41\
\x30\x38\x42\x38\x37\x32\x43\x42\x36\x30\x43\x44\x35\x22\x2f\x3e\
\x20\x3c\x2f\x72\x64\x66\x3a\x44\x65\x73\x63\x72\x69\x70\x74\x69\
\x6f\x6e\x3e\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\x3e\x20\x3c\
\x2f\x78\x3a\x78\x6d\x70\x6d\x65\x74\x61\x3e\x20\x3c\x3f\x78\x70\
\x61\x63\x6b\x65\x74\x20\x65\x6e\x64\x3d\x22\x72\x22\x3f\x3e\x50\
\x12\xed\xaf\x00\x00\x00\x3c\x49\x44\x41\x54\x78\xda\x62\xfc\xff\
\xff\x3f\x03\x21\xc0\xc4\x40\x04\x20\x4a\x11\x0b\x88\x38\x7b\xf6\
\x2c\x4e\x3b\x8d\x8d\x8d\x19\x61\x26\x31\xe2\x50\xc3\x88\x6e\x1d\
\x23\x36\x05\x70\x45\x40\x23\xd1\x25\x50\x34\x30\x52\x2d\x08\x00\
\x02\x0c\x00\x52\x1c\x0a\xac\x63\x42\x07\x75\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\x6a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x31\x49\x44\x41\x54\x18\x95\x63\x34\x36\x36\xfe\xcf\
\x40\x00\xb0\x40\x69\x46\x3c\x6a\xfe\x33\x11\x32\x85\x81\x81\x81\
\x81\x28\x45\x2c\x48\x6c\x6c\x6e\x63\x44\x57\x84\xd3\x5d\xd4\x77\
\x13\xde\xb0\x02\x00\xee\x60\x04\xb2\xdd\x37\x6a\x98\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\x5e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x25\x49\x44\x41\x54\x18\x95\x63\x60\xa0\x16\x60\x84\
\x31\x8c\x8d\x8d\xff\xa3\x4b\x9e\x3d\x7b\x96\x91\x81\x81\x81\x81\
\x89\x18\x93\xe8\xac\x88\x7a\x00\x00\xb9\x91\x04\x0a\xd2\x01\x5c\
\xd3\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xc6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x03\x22\x69\x54\x58\x74\x58\x4d\x4c\
\x3a\x63\x6f\x6d\x2e\x61\x64\x6f\x62\x65\x2e\x78\x6d\x70\x00\x00\
\x00\x00\x00\x3c\x3f\x78\x70\x61\x63\x6b\x65\x74\x20\x62\x65\x67\
\x69\x6e\x3d\x22\xef\xbb\xbf\x22\x20\x69\x64\x3d\x22\x57\x35\x4d\
\x30\x4d\x70\x43\x65\x68\x69\x48\x7a\x72\x65\x53\x7a\x4e\x54\x63\
\x7a\x6b\x63\x39\x64\x22\x3f\x3e\x20\x3c\x78\x3a\x78\x6d\x70\x6d\
\x65\x74\x61\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x3d\x22\x61\x64\x6f\
\x62\x65\x3a\x6e\x73\x3a\x6d\x65\x74\x61\x2f\x22\x20\x78\x3a\x78\
\x6d\x70\x74\x6b\x3d\x22\x41\x64\x6f\x62\x65\x20\x58\x4d\x50\x20\
\x43\x6f\x72\x65\x20\x35\x2e\x33\x2d\x63\x30\x31\x31\x20\x36\x36\
\x2e\x31\x34\x35\x36\x36\x31\x2c\x20\x32\x30\x31\x32\x2f\x30\x32\
\x2f\x30\x36\x2d\x31\x34\x3a\x35\x36\x3a\x32\x37\x20\x20\x20\x20\
\x20\x20\x20\x20\x22\x3e\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x20\
\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\
\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\
\x61\x78\x2d\x6e\x73\x23\x22\x3e\x20\x3c\x72\x64\x66\x3a\x44\x65\
\x73\x63\x72\x69\x70\x74\x69\x6f\x6e\x20\x72\x64\x66\x3a\x61\x62\
\x6f\x75\x74\x3d\x22\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\
\x65\x2e\x63\x6f\x6d\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x22\x20\
\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\x4d\x4d\x3d\x22\x68\x74\x74\
\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\
\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x6d\x6d\x2f\x22\x20\x78\x6d\
\x6c\x6e\x73\x3a\x73\x74\x52\x65\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\x2f\x78\
\x61\x70\x2f\x31\x2e\x30\x2f\x73\x54\x79\x70\x65\x2f\x52\x65\x73\
\x6f\x75\x72\x63\x65\x52\x65\x66\x23\x22\x20\x78\x6d\x70\x3a\x43\
\x72\x65\x61\x74\x6f\x72\x54\x6f\x6f\x6c\x3d\x22\x41\x64\x6f\x62\
\x65\x20\x50\x68\x6f\x74\x6f\x73\x68\x6f\x70\x20\x43\x53\x36\x20\
\x28\x57\x69\x6e\x64\x6f\x77\x73\x29\x22\x20\x78\x6d\x70\x4d\x4d\
\x3a\x49\x6e\x73\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\
\x2e\x69\x69\x64\x3a\x37\x33\x42\x31\x41\x34\x46\x45\x45\x38\x42\
\x44\x31\x31\x45\x38\x41\x42\x41\x38\x46\x36\x45\x34\x30\x35\x36\
\x30\x43\x42\x39\x32\x22\x20\x78\x6d\x70\x4d\x4d\x3a\x44\x6f\x63\
\x75\x6d\x65\x6e\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\
\x3a\x37\x33\x42\x31\x41\x34\x46\x46\x45\x38\x42\x44\x31\x31\x45\
\x38\x41\x42\x41\x38\x46\x36\x45\x34\x30\x35\x36\x30\x43\x42\x39\
\x32\x22\x3e\x20\x3c\x78\x6d\x70\x4d\x4d\x3a\x44\x65\x72\x69\x76\
\x65\x64\x46\x72\x6f\x6d\x20\x73\x74\x52\x65\x66\x3a\x69\x6e\x73\
\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\x2e\x69\x69\x64\
\x3a\x37\x33\x42\x31\x41\x34\x46\x43\x45\x38\x42\x44\x31\x31\x45\
\x38\x41\x42\x41\x38\x46\x36\x45\x34\x30\x35\x36\x30\x43\x42\x39\
\x32\x22\x20\x73\x74\x52\x65\x66\x3a\x64\x6f\x63\x75\x6d\x65\x6e\
\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\x3a\x37\x33\x42\
\x31\x41\x34\x46\x44\x45\x38\x42\x44\x31\x31\x45\x38\x41\x42\x41\
\x38\x46\x36\x45\x34\x30\x35\x36\x30\x43\x42\x39\x32\x22\x2f\x3e\
\x20\x3c\x2f\x72\x64\x66\x3a\x44\x65\x73\x63\x72\x69\x70\x74\x69\
\x6f\x6e\x3e\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\x3e\x20\x3c\
\x2f\x78\x3a\x78\x6d\x70\x6d\x65\x74\x61\x3e\x20\x3c\x3f\x78\x70\
\x61\x63\x6b\x65\x74\x20\x65\x6e\x64\x3d\x22\x72\x22\x3f\x3e\x87\
\x36\x7f\x88\x00\x00\x00\x3a\x49\x44\x41\x54\x78\xda\x62\x3c\x73\
\xe6\xcc\x7f\x06\x02\x80\x05\x4a\x33\xe2\x51\xf3\x9f\x09\x9b\x20\
\xba\x00\x13\x03\x11\x80\x05\x87\x09\xff\x91\x9d\x81\xac\x88\x11\
\x49\x01\x23\xc9\xd6\x61\x53\xc4\x88\xcb\x4d\x78\xc3\x0a\x20\xc0\
\x00\x4e\xc6\x09\x7c\x09\x4e\xec\x4b\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x00\x5e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x05\x00\x00\x00\x05\x08\x06\x00\x00\x00\x8d\x6f\x26\xe5\
\x00\x00\x00\x25\x49\x44\x41\x54\x08\x99\x5d\xc8\x31\x01\x00\x20\
\x0c\x03\xb0\x30\x4b\x15\x08\xd2\xb9\x38\x68\xce\xac\x24\xca\x99\
\x0e\xec\xe9\x80\xe9\x78\xf9\x05\x5c\x0b\xfa\x05\x1f\x6e\x0e\xad\
\x79\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xc9\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x03\x22\x69\x54\x58\x74\x58\x4d\x4c\
\x3a\x63\x6f\x6d\x2e\x61\x64\x6f\x62\x65\x2e\x78\x6d\x70\x00\x00\
\x00\x00\x00\x3c\x3f\x78\x70\x61\x63\x6b\x65\x74\x20\x62\x65\x67\
\x69\x6e\x3d\x22\xef\xbb\xbf\x22\x20\x69\x64\x3d\x22\x57\x35\x4d\
\x30\x4d\x70\x43\x65\x68\x69\x48\x7a\x72\x65\x53\x7a\x4e\x54\x63\
\x7a\x6b\x63\x39\x64\x22\x3f\x3e\x20\x3c\x78\x3a\x78\x6d\x70\x6d\
\x65\x74\x61\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x3d\x22\x61\x64\x6f\
\x62\x65\x3a\x6e\x73\x3a\x6d\x65\x74\x61\x2f\x22\x20\x78\x3a\x78\
\x6d\x70\x74\x6b\x3d\x22\x41\x64\x6f\x62\x65\x20\x58\x4d\x50\x20\
\x43\x6f\x72\x65\x20\x35\x2e\x33\x2d\x63\x30\x31\x31\x20\x36\x36\
\x2e\x31\x34\x35\x36\x36\x31\x2c\x20\x32\x30\x31\x32\x2f\x30\x32\
\x2f\x30\x36\x2d\x31\x34\x3a\x35\x36\x3a\x32\x37\x20\x20\x20\x20\
\x20\x20\x20\x20\x22\x3e\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x20\
\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\
\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\
\x61\x78\x2d\x6e\x73\x23\x22\x3e\x20\x3c\x72\x64\x66\x3a\x44\x65\
\x73\x63\x72\x69\x70\x74\x69\x6f\x6e\x20\x72\x64\x66\x3a\x61\x62\
\x6f\x75\x74\x3d\x22\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\
\x65\x2e\x63\x6f\x6d\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x22\x20\
\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\x4d\x4d\x3d\x22\x68\x74\x74\
\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\
\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x6d\x6d\x2f\x22\x20\x78\x6d\
\x6c\x6e\x73\x3a\x73\x74\x52\x65\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\x2f\x78\
\x61\x70\x2f\x31\x2e\x30\x2f\x73\x54\x79\x70\x65\x2f\x52\x65\x73\
\x6f\x75\x72\x63\x65\x52\x65\x66\x23\x22\x20\x78\x6d\x70\x3a\x43\
\x72\x65\x61\x74\x6f\x72\x54\x6f\x6f\x6c\x3d\x22\x41\x64\x6f\x62\
\x65\x20\x50\x68\x6f\x74\x6f\x73\x68\x6f\x70\x20\x43\x53\x36\x20\
\x28\x57\x69\x6e\x64\x6f\x77\x73\x29\x22\x20\x78\x6d\x70\x4d\x4d\
\x3a\x49\x6e\x73\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\
\x2e\x69\x69\x64\x3a\x46\x36\x42\x35\x31\x43\x38\x46\x45\x38\x42\
\x42\x31\x31\x45\x38\x42\x46\x44\x46\x41\x41\x31\x43\x30\x33\x32\
\x39\x34\x41\x43\x32\x22\x20\x78\x6d\x70\x4d\x4d\x3a\x44\x6f\x63\
\x75\x6d\x65\x6e\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\
\x3a\x46\x36\x42\x35\x31\x43\x39\x30\x45\x38\x42\x42\x31\x31\x45\
\x38\x42\x46\x44\x46\x41\x41\x31\x43\x30\x33\x32\x39\x34\x41\x43\
\x32\x22\x3e\x20\x3c\x78\x6d\x70\x4d\x4d\x3a\x44\x65\x72\x69\x76\
\x65\x64\x46\x72\x6f\x6d\x20\x73\x74\x52\x65\x66\x3a\x69\x6e\x73\
\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\x2e\x69\x69\x64\
\x3a\x46\x36\x42\x35\x31\x43\x38\x44\x45\x38\x42\x42\x31\x31\x45\
\x38\x42\x46\x44\x46\x41\x41\x31\x43\x30\x33\x32\x39\x34\x41\x43\
\x32\x22\x20\x73\x74\x52\x65\x66\x3a\x64\x6f\x63\x75\x6d\x65\x6e\
\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\x3a\x46\x36\x42\
\x35\x31\x43\x38\x45\x45\x38\x42\x42\x31\x31\x45\x38\x42\x46\x44\
\x46\x41\x41\x31\x43\x30\x33\x32\x39\x34\x41\x43\x32\x22\x2f\x3e\
\x20\x3c\x2f\x72\x64\x66\x3a\x44\x65\x73\x63\x72\x69\x70\x74\x69\
\x6f\x6e\x3e\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\x3e\x20\x3c\
\x2f\x78\x3a\x78\x6d\x70\x6d\x65\x74\x61\x3e\x20\x3c\x3f\x78\x70\
\x61\x63\x6b\x65\x74\x20\x65\x6e\x64\x3d\x22\x72\x22\x3f\x3e\xbd\
\x1a\x2c\x7a\x00\x00\x00\x3d\x49\x44\x41\x54\x78\xda\x62\xfc\xff\
\xff\x3f\x03\x21\xc0\xc4\x40\x04\x60\x01\x11\x67\xcf\x9e\x45\x16\
\x03\x19\xcd\x08\x62\x18\x1b\x1b\x63\x35\xe9\x3f\x32\x0d\xd3\xcc\
\x84\x45\x01\x06\x9f\x09\x87\x02\x14\x85\x8c\x54\xf3\x1d\x51\x8a\
\x00\x02\x0c\x00\xb1\x49\x12\x6e\xce\x39\x93\xd8\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\x74\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x3b\x49\x44\x41\x54\x18\x95\x63\x60\x20\x02\x30\x22\
\x73\x8c\x8d\x8d\xff\x23\x8b\x9d\x3d\x7b\x96\x81\x81\x81\x81\x81\
\x09\x8b\xc6\xff\xe8\x02\xd8\x14\x61\x28\xc4\xa5\x08\x45\x21\x3e\
\x45\x8c\x84\x14\xa1\x78\x08\x9b\x22\x46\x2c\x62\x84\x01\x00\x9a\
\xdc\x07\x0e\x55\xa8\x58\x10\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x03\xca\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x03\x22\x69\x54\x58\x74\x58\x4d\x4c\
\x3a\x63\x6f\x6d\x2e\x61\x64\x6f\x62\x65\x2e\x78\x6d\x70\x00\x00\
\x00\x00\x00\x3c\x3f\x78\x70\x61\x63\x6b\x65\x74\x20\x62\x65\x67\
\x69\x6e\x3d\x22\xef\xbb\xbf\x22\x20\x69\x64\x3d\x22\x57\x35\x4d\
\x30\x4d\x70\x43\x65\x68\x69\x48\x7a\x72\x65\x53\x7a\x4e\x54\x63\
\x7a\x6b\x63\x39\x64\x22\x3f\x3e\x20\x3c\x78\x3a\x78\x6d\x70\x6d\
\x65\x74\x61\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x3d\x22\x61\x64\x6f\
\x62\x65\x3a\x6e\x73\x3a\x6d\x65\x74\x61\x2f\x22\x20\x78\x3a\x78\
\x6d\x70\x74\x6b\x3d\x22\x41\x64\x6f\x62\x65\x20\x58\x4d\x50\x20\
\x43\x6f\x72\x65\x20\x35\x2e\x33\x2d\x63\x30\x31\x31\x20\x36\x36\
\x2e\x31\x34\x35\x36\x36\x31\x2c\x20\x32\x30\x31\x32\x2f\x30\x32\
\x2f\x30\x36\x2d\x31\x34\x3a\x35\x36\x3a\x32\x37\x20\x20\x20\x20\
\x20\x20\x20\x20\x22\x3e\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x20\
\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\
\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\
\x61\x78\x2d\x6e\x73\x23\x22\x3e\x20\x3c\x72\x64\x66\x3a\x44\x65\
\x73\x63\x72\x69\x70\x74\x69\x6f\x6e\x20\x72\x64\x66\x3a\x61\x62\
\x6f\x75\x74\x3d\x22\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\
\x65\x2e\x63\x6f\x6d\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x22\x20\
\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\x4d\x4d\x3d\x22\x68\x74\x74\
\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\
\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x6d\x6d\x2f\x22\x20\x78\x6d\
\x6c\x6e\x73\x3a\x73\x74\x52\x65\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\x2f\x78\
\x61\x70\x2f\x31\x2e\x30\x2f\x73\x54\x79\x70\x65\x2f\x52\x65\x73\
\x6f\x75\x72\x63\x65\x52\x65\x66\x23\x22\x20\x78\x6d\x70\x3a\x43\
\x72\x65\x61\x74\x6f\x72\x54\x6f\x6f\x6c\x3d\x22\x41\x64\x6f\x62\
\x65\x20\x50\x68\x6f\x74\x6f\x73\x68\x6f\x70\x20\x43\x53\x36\x20\
\x28\x57\x69\x6e\x64\x6f\x77\x73\x29\x22\x20\x78\x6d\x70\x4d\x4d\
\x3a\x49\x6e\x73\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\
\x2e\x69\x69\x64\x3a\x31\x35\x39\x32\x45\x41\x41\x41\x45\x38\x42\
\x44\x31\x31\x45\x38\x39\x33\x38\x36\x44\x34\x38\x42\x34\x34\x35\
\x35\x32\x42\x45\x41\x22\x20\x78\x6d\x70\x4d\x4d\x3a\x44\x6f\x63\
\x75\x6d\x65\x6e\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\
\x3a\x31\x35\x39\x32\x45\x41\x41\x42\x45\x38\x42\x44\x31\x31\x45\
\x38\x39\x33\x38\x36\x44\x34\x38\x42\x34\x34\x35\x35\x32\x42\x45\
\x41\x22\x3e\x20\x3c\x78\x6d\x70\x4d\x4d\x3a\x44\x65\x72\x69\x76\
\x65\x64\x46\x72\x6f\x6d\x20\x73\x74\x52\x65\x66\x3a\x69\x6e\x73\
\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\x2e\x69\x69\x64\
\x3a\x31\x35\x39\x32\x45\x41\x41\x38\x45\x38\x42\x44\x31\x31\x45\
\x38\x39\x33\x38\x36\x44\x34\x38\x42\x34\x34\x35\x35\x32\x42\x45\
\x41\x22\x20\x73\x74\x52\x65\x66\x3a\x64\x6f\x63\x75\x6d\x65\x6e\
\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\x3a\x31\x35\x39\
\x32\x45\x41\x41\x39\x45\x38\x42\x44\x31\x31\x45\x38\x39\x33\x38\
\x36\x44\x34\x38\x42\x34\x34\x35\x35\x32\x42\x45\x41\x22\x2f\x3e\
\x20\x3c\x2f\x72\x64\x66\x3a\x44\x65\x73\x63\x72\x69\x70\x74\x69\
\x6f\x6e\x3e\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\x3e\x20\x3c\
\x2f\x78\x3a\x78\x6d\x70\x6d\x65\x74\x61\x3e\x20\x3c\x3f\x78\x70\
\x61\x63\x6b\x65\x74\x20\x65\x6e\x64\x3d\x22\x72\x22\x3f\x3e\x25\
\x1e\x21\xc9\x00\x00\x00\x3e\x49\x44\x41\x54\x78\xda\x62\x3c\x73\
\xe6\x0c\x03\x01\xf0\x9f\x89\x90\x02\x10\xc1\x44\x48\x01\x3e\x45\
\xff\x91\xd8\x8c\x4c\x68\x02\x18\x0a\x90\x4d\xfa\x8f\x4b\x01\xba\
\x75\x58\x15\xc0\x14\x31\xa2\x59\x87\xce\x87\x9b\xc4\x88\x4b\x01\
\x08\x00\x04\x18\x00\xcb\xfa\x0b\x75\x42\x7e\x04\xf0\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\x72\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x39\x49\x44\x41\x54\x18\x95\x63\x34\x36\x36\xfe\xcf\
\x40\x00\xb0\x40\x69\x46\x3c\x6a\xfe\x33\x61\x13\x44\x17\xc0\xa6\
\x08\xa7\x75\xe8\x26\xc0\xd8\x8c\xe8\x8a\x18\x91\x14\xa0\xb8\x91\
\x28\xeb\xb0\x29\xc2\xf0\x29\xcc\x3a\xbc\x61\x05\x00\xfd\x20\x07\
\xb1\x0e\xa6\xbf\xc7\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x0e\
\x0d\x4e\x7b\x47\
\x00\x62\
\x00\x72\x00\x61\x00\x6e\x00\x63\x00\x68\x00\x2d\x00\x65\x00\x6e\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x13\
\x0c\x50\x6a\x27\
\x00\x61\
\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x64\x00\x6f\x00\x77\x00\x6e\x00\x5f\x00\x64\x00\x61\x00\x72\x00\x6b\x00\x2e\x00\x70\
\x00\x6e\x00\x67\
\x00\x20\
\x0e\xe6\xe0\xe7\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x69\x00\x6e\x00\x64\x00\x65\x00\x74\x00\x65\x00\x72\x00\x6d\
\x00\x69\x00\x6e\x00\x61\x00\x74\x00\x65\x00\x5f\x00\x6c\x00\x69\x00\x67\x00\x68\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x15\
\x03\xbd\xf5\x87\
\x00\x61\
\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x72\x00\x69\x00\x67\x00\x68\x00\x74\x00\x5f\x00\x6c\x00\x69\x00\x67\x00\x68\x00\x74\
\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x00\x48\xad\x27\
\x00\x76\
\x00\x6c\x00\x69\x00\x6e\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x06\x16\x91\xe7\
\x00\x62\
\x00\x72\x00\x61\x00\x6e\x00\x63\x00\x68\x00\x2d\x00\x6d\x00\x6f\x00\x72\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x03\xf9\x79\xa7\
\x00\x61\
\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x75\x00\x70\x00\x5f\x00\x64\x00\x61\x00\x72\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x19\
\x01\x2a\xc1\x07\
\x00\x70\
\x00\x6f\x00\x70\x00\x75\x00\x70\x00\x5f\x00\x69\x00\x6e\x00\x64\x00\x69\x00\x63\x00\x61\x00\x74\x00\x6f\x00\x72\x00\x5f\x00\x6c\
\x00\x69\x00\x67\x00\x68\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x19\
\x00\x72\xec\xc7\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x5f\
\x00\x64\x00\x61\x00\x72\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1d\
\x04\x8e\xd7\x67\
\x00\x72\
\x00\x61\x00\x64\x00\x69\x00\x6f\x00\x62\x00\x75\x00\x74\x00\x74\x00\x6f\x00\x6e\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\
\x00\x65\x00\x64\x00\x5f\x00\x6c\x00\x69\x00\x67\x00\x68\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0e\
\x0a\x5b\x66\xa7\
\x00\x63\
\x00\x72\x00\x6f\x00\x73\x00\x73\x00\x5f\x00\x64\x00\x61\x00\x72\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x00\xa4\x38\xe7\
\x00\x63\
\x00\x72\x00\x6f\x00\x73\x00\x73\x00\x5f\x00\x6c\x00\x69\x00\x67\x00\x68\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x18\
\x0e\x47\x66\xa7\
\x00\x69\
\x00\x6e\x00\x64\x00\x69\x00\x63\x00\x61\x00\x74\x00\x6f\x00\x72\x00\x5f\x00\x6c\x00\x65\x00\x73\x00\x73\x00\x5f\x00\x6c\x00\x69\
\x00\x67\x00\x68\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x14\
\x0c\x07\x7e\x47\
\x00\x61\
\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x6c\x00\x65\x00\x66\x00\x74\x00\x5f\x00\x6c\x00\x69\x00\x67\x00\x68\x00\x74\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
\x00\x1c\
\x06\x1d\xc0\x47\
\x00\x72\
\x00\x61\x00\x64\x00\x69\x00\x6f\x00\x62\x00\x75\x00\x74\x00\x74\x00\x6f\x00\x6e\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\
\x00\x65\x00\x64\x00\x5f\x00\x64\x00\x61\x00\x72\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x13\
\x0f\x94\xaa\xc7\
\x00\x61\
\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x6c\x00\x65\x00\x66\x00\x74\x00\x5f\x00\x64\x00\x61\x00\x72\x00\x6b\x00\x2e\x00\x70\
\x00\x6e\x00\x67\
\x00\x14\
\x01\xd3\x70\x27\
\x00\x61\
\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x64\x00\x6f\x00\x77\x00\x6e\x00\x5f\x00\x6c\x00\x69\x00\x67\x00\x68\x00\x74\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
\x00\x17\
\x08\xb8\xab\x47\
\x00\x69\
\x00\x6e\x00\x64\x00\x69\x00\x63\x00\x61\x00\x74\x00\x6f\x00\x72\x00\x5f\x00\x6c\x00\x65\x00\x73\x00\x73\x00\x5f\x00\x64\x00\x61\
\x00\x72\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1f\
\x0a\xbf\x43\x27\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x69\x00\x6e\x00\x64\x00\x65\x00\x74\x00\x65\x00\x72\x00\x6d\
\x00\x69\x00\x6e\x00\x61\x00\x74\x00\x65\x00\x5f\x00\x64\x00\x61\x00\x72\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x18\
\x0a\x1b\x66\x07\
\x00\x69\
\x00\x6e\x00\x64\x00\x69\x00\x63\x00\x61\x00\x74\x00\x6f\x00\x72\x00\x5f\x00\x6d\x00\x6f\x00\x72\x00\x65\x00\x5f\x00\x6c\x00\x69\
\x00\x67\x00\x68\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x18\
\x05\x43\x81\x27\
\x00\x70\
\x00\x6f\x00\x70\x00\x75\x00\x70\x00\x5f\x00\x69\x00\x6e\x00\x64\x00\x69\x00\x63\x00\x61\x00\x74\x00\x6f\x00\x72\x00\x5f\x00\x64\
\x00\x61\x00\x72\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x12\
\x0a\x44\x49\xc7\
\x00\x61\
\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x75\x00\x70\x00\x5f\x00\x6c\x00\x69\x00\x67\x00\x68\x00\x74\x00\x2e\x00\x70\x00\x6e\
\x00\x67\
\x00\x14\
\x01\x6a\xf2\x67\
\x00\x61\
\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x72\x00\x69\x00\x67\x00\x68\x00\x74\x00\x5f\x00\x64\x00\x61\x00\x72\x00\x6b\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
\x00\x1a\
\x02\x3c\x9f\xa7\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x5f\
\x00\x6c\x00\x69\x00\x67\x00\x68\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x17\
\x0d\xf4\xeb\x47\
\x00\x69\
\x00\x6e\x00\x64\x00\x69\x00\x63\x00\x61\x00\x74\x00\x6f\x00\x72\x00\x5f\x00\x6d\x00\x6f\x00\x72\x00\x65\x00\x5f\x00\x64\x00\x61\
\x00\x72\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x19\x00\x00\x00\x02\
\x00\x00\x00\xd6\x00\x00\x00\x00\x00\x01\x00\x00\x08\x6c\
\x00\x00\x01\x72\x00\x00\x00\x00\x00\x01\x00\x00\x0a\xa7\
\x00\x00\x02\x0c\x00\x00\x00\x00\x00\x01\x00\x00\x0c\x84\
\x00\x00\x01\x3a\x00\x00\x00\x00\x00\x01\x00\x00\x09\xe5\
\x00\x00\x04\x3a\x00\x00\x00\x00\x00\x01\x00\x00\x22\x7c\
\x00\x00\x02\xfe\x00\x00\x00\x00\x00\x01\x00\x00\x15\xe7\
\x00\x00\x04\x68\x00\x00\x00\x00\x00\x01\x00\x00\x22\xf4\
\x00\x00\x00\xa6\x00\x00\x00\x00\x00\x01\x00\x00\x04\x9c\
\x00\x00\x01\x12\x00\x00\x00\x00\x00\x01\x00\x00\x09\x70\
\x00\x00\x01\xaa\x00\x00\x00\x00\x00\x01\x00\x00\x0b\x22\
\x00\x00\x03\xda\x00\x00\x00\x00\x00\x01\x00\x00\x1e\x4d\
\x00\x00\x00\xee\x00\x00\x00\x00\x00\x01\x00\x00\x08\xda\
\x00\x00\x02\x94\x00\x00\x00\x00\x00\x01\x00\x00\x14\xcd\
\x00\x00\x03\x2c\x00\x00\x00\x00\x00\x01\x00\x00\x19\xb3\
\x00\x00\x03\xa4\x00\x00\x00\x00\x00\x01\x00\x00\x1a\x83\
\x00\x00\x04\x10\x00\x00\x00\x00\x00\x01\x00\x00\x1e\xaf\
\x00\x00\x01\xea\x00\x00\x00\x00\x00\x01\x00\x00\x0b\xc8\
\x00\x00\x03\x60\x00\x00\x00\x00\x00\x01\x00\x00\x1a\x21\
\x00\x00\x02\x66\x00\x00\x00\x00\x00\x01\x00\x00\x10\xfe\
\x00\x00\x00\x34\x00\x00\x00\x00\x00\x01\x00\x00\x00\x78\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x04\xa2\x00\x00\x00\x00\x00\x01\x00\x00\x26\xc2\
\x00\x00\x02\x30\x00\x00\x00\x00\x00\x01\x00\x00\x0d\x33\
\x00\x00\x00\x60\x00\x00\x00\x00\x00\x01\x00\x00\x00\xe2\
\x00\x00\x02\xd2\x00\x00\x00\x00\x00\x01\x00\x00\x15\x73\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x19\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\xd6\x00\x00\x00\x00\x00\x01\x00\x00\x08\x6c\
\x00\x00\x01\x67\xc8\xb0\x92\xb0\
\x00\x00\x01\x72\x00\x00\x00\x00\x00\x01\x00\x00\x0a\xa7\
\x00\x00\x01\x67\xc8\xaf\x6d\xb6\
\x00\x00\x02\x0c\x00\x00\x00\x00\x00\x01\x00\x00\x0c\x84\
\x00\x00\x01\x67\xc8\xb1\x39\x22\
\x00\x00\x01\x3a\x00\x00\x00\x00\x00\x01\x00\x00\x09\xe5\
\x00\x00\x01\x66\xf5\x0d\xf0\xb6\
\x00\x00\x04\x3a\x00\x00\x00\x00\x00\x01\x00\x00\x22\x7c\
\x00\x00\x01\x67\xc8\xae\xfb\x6a\
\x00\x00\x02\xfe\x00\x00\x00\x00\x00\x01\x00\x00\x15\xe7\
\x00\x00\x01\x67\x42\x41\x95\x69\
\x00\x00\x04\x68\x00\x00\x00\x00\x00\x01\x00\x00\x22\xf4\
\x00\x00\x01\x67\x42\x41\x96\xdf\
\x00\x00\x00\xa6\x00\x00\x00\x00\x00\x01\x00\x00\x04\x9c\
\x00\x00\x01\x67\x42\x41\x96\x31\
\x00\x00\x01\x12\x00\x00\x00\x00\x00\x01\x00\x00\x09\x70\
\x00\x00\x01\x67\xc8\xaf\x38\x3f\
\x00\x00\x01\xaa\x00\x00\x00\x00\x00\x01\x00\x00\x0b\x22\
\x00\x00\x01\x67\xc8\xb1\xf9\x90\
\x00\x00\x03\xda\x00\x00\x00\x00\x00\x01\x00\x00\x1e\x4d\
\x00\x00\x01\x67\xc8\xb1\x1a\xb8\
\x00\x00\x00\xee\x00\x00\x00\x00\x00\x01\x00\x00\x08\xda\
\x00\x00\x01\x67\xc8\xb0\xfc\x2f\
\x00\x00\x02\x94\x00\x00\x00\x00\x00\x01\x00\x00\x14\xcd\
\x00\x00\x01\x67\xc8\xae\xb3\x0a\
\x00\x00\x03\x2c\x00\x00\x00\x00\x00\x01\x00\x00\x19\xb3\
\x00\x00\x01\x67\xc8\xb0\x46\x24\
\x00\x00\x03\xa4\x00\x00\x00\x00\x00\x01\x00\x00\x1a\x83\
\x00\x00\x01\x67\x42\x41\x97\xf3\
\x00\x00\x04\x10\x00\x00\x00\x00\x00\x01\x00\x00\x1e\xaf\
\x00\x00\x01\x67\x42\x41\x96\x96\
\x00\x00\x01\xea\x00\x00\x00\x00\x00\x01\x00\x00\x0b\xc8\
\x00\x00\x01\x67\xc8\xae\x8c\xe9\
\x00\x00\x03\x60\x00\x00\x00\x00\x00\x01\x00\x00\x1a\x21\
\x00\x00\x01\x67\xc8\xb1\x5c\x5f\
\x00\x00\x02\x66\x00\x00\x00\x00\x00\x01\x00\x00\x10\xfe\
\x00\x00\x01\x67\x42\x41\x95\xcf\
\x00\x00\x00\x34\x00\x00\x00\x00\x00\x01\x00\x00\x00\x78\
\x00\x00\x01\x67\xc8\xae\x0e\x0f\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x67\xc8\xb0\xba\xc5\
\x00\x00\x04\xa2\x00\x00\x00\x00\x00\x01\x00\x00\x26\xc2\
\x00\x00\x01\x67\xc8\xaf\xa6\xe2\
\x00\x00\x02\x30\x00\x00\x00\x00\x00\x01\x00\x00\x0d\x33\
\x00\x00\x01\x67\x42\x41\x97\x8f\
\x00\x00\x00\x60\x00\x00\x00\x00\x00\x01\x00\x00\x00\xe2\
\x00\x00\x01\x67\x42\x41\x97\x3b\
\x00\x00\x02\xd2\x00\x00\x00\x00\x00\x01\x00\x00\x15\x73\
\x00\x00\x01\x67\xc8\xaf\xef\xf7\
"
qt_version = QtCore.qVersion().split('.')
if qt_version < ['5', '8', '0']:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def | ():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| qInitResources |
password-gen.py | import argparse
import random
parser = argparse.ArgumentParser(prog='xkcdpwgen', description='Generate a secure, memorable password using the XKCD method')
parser.add_argument("-w", "--words", type=int, default=4, help='include WORDS words in the password (default=4)')
parser.add_argument("-c", "--caps", type=int, default=0, help='capitalize the first letter of CAPS random words (default=0)') |
def choose_word(file_name):
with open(file_name) as word_file:
return list(set(word.strip() for word in word_file))
def random_words(n, words):
return "".join(random.choice(words) for _ in range(n))
def main(file_name):
words = choose_word(file_name)
n = int(args.words)
print(random_words(n, words))
# Here is where you can add whatever dictionary you choose. I used corncob_lowercase.txt
if __name__ == "__main__":
file_name = "corncob_lowercase.txt"
main(file_name) | parser.add_argument("-n", "--numbers", type=int, default=0, help='insert NUMBERS random numbers randomly in the password (default=0)')
parser.add_argument("-s", "--symbols", type=int, default=0, help='insert SYMBOLS random symbols randomly in the password (default=0)')
args = parser.parse_args() |
common.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
helper classes and functions
'''
import os, sys, string, hashlib
import re, textwrap
from unicodedata import normalize
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class DummyStream:
''' dummyStream behaves like a stream but does nothing. '''
def __init__(self): pass
def write(self,data): pass
def read(self,data): pass
def flush(self): pass
def close(self): pass
def getAppPath():
'''Get the path to this script no matter how it's run.'''
#Determine if the application is a py/pyw or a frozen exe.
if hasattr(sys, 'frozen'):
# If run from exe
#dir_path = os.path.dirname(unicode(sys.executable, sys.getfilesystemencoding()))
dir_path = os.path.dirname(sys.executable)
elif '__file__' in locals():
# If run from py
dir_path = os.path.dirname(unicode(__file__, sys.getfilesystemencoding()))
else:
# If run from command line
#dir_path = sys.path[0]
dir_path = os.getcwdu()
return dir_path
def getHomeDir():
if sys.platform == 'win32':
import winpaths
homedir = winpaths.get_common_appdata() # = e.g # = e.g 'C:\ProgramData'
else:
homedir = os.path.expanduser("~")
return homedir
def | (d):
if not os.path.exists(d):
os.makedirs(d)
return d
def ensureDir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
return f
def _xorData(data):
"""Xor Method, Take a data Xor all bytes and return"""
data = [chr(ord(c) ^ 10) for c in data]
return string.join(data, '')
def readFile(path, offset=0, size=-1, xor_data=False):
"""Read specified block from file, using the given size and offset"""
fd = open(path, 'rb')
fd.seek(offset)
data = fd.read(size)
fd.close()
return _xorData(data) if xor_data else data
def writeFile(path, buf, offset=0, xor_data=False):
"""Write specified block on file at the given offset"""
if xor_data:
buf = _xorData(buf)
fd = open(path, 'wb')
fd.seek(offset)
fd.write(buf)
fd.close()
return len(buf)
def md5_for_file(f, block_size=2**20):
md5 = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def smart_strip(s, max_length=0):
s = s.strip()
if max_length == 0 or len(s) <= max_length:
return s
if max_length > 3:
return s[:-(len(s) - max_length + 3)].strip() + '...'
else:
return s[:-(len(s) - max_length)].strip()
def strip_by_word(the_string, width):
if width <= 0:
return the_string.strip()
s = the_string
if len(the_string) > width:
s = textwrap.wrap(s, width)[0]
if s[-1:] in [u'.', u',', u'?', u'!', u';', u'-', u':']:
s = s[:-1].strip()
if len(s) < len(the_string):
s += '...'
return s | makeDir |
get_service_catalog_terraform_diagram_responses.go | // Code generated by go-swagger; DO NOT EDIT.
package service_catalogs
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
strfmt "github.com/go-openapi/strfmt"
models "github.com/cycloidio/cycloid-cli/client/models"
)
// GetServiceCatalogTerraformDiagramReader is a Reader for the GetServiceCatalogTerraformDiagram structure.
type GetServiceCatalogTerraformDiagramReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetServiceCatalogTerraformDiagramReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetServiceCatalogTerraformDiagramOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 403:
result := NewGetServiceCatalogTerraformDiagramForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewGetServiceCatalogTerraformDiagramNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
result := NewGetServiceCatalogTerraformDiagramDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewGetServiceCatalogTerraformDiagramOK creates a GetServiceCatalogTerraformDiagramOK with default headers values
func NewGetServiceCatalogTerraformDiagramOK() *GetServiceCatalogTerraformDiagramOK {
return &GetServiceCatalogTerraformDiagramOK{}
}
/*GetServiceCatalogTerraformDiagramOK handles this case with default header values.
The information of Terraform Diagram
*/
type GetServiceCatalogTerraformDiagramOK struct {
Payload *GetServiceCatalogTerraformDiagramOKBody
}
func (o *GetServiceCatalogTerraformDiagramOK) Error() string {
return fmt.Sprintf("[GET /organizations/{organization_canonical}/service_catalogs/{service_catalog_ref}/terraform/diagram][%d] getServiceCatalogTerraformDiagramOK %+v", 200, o.Payload)
}
func (o *GetServiceCatalogTerraformDiagramOK) GetPayload() *GetServiceCatalogTerraformDiagramOKBody {
return o.Payload
}
func (o *GetServiceCatalogTerraformDiagramOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(GetServiceCatalogTerraformDiagramOKBody)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetServiceCatalogTerraformDiagramForbidden creates a GetServiceCatalogTerraformDiagramForbidden with default headers values
func NewGetServiceCatalogTerraformDiagramForbidden() *GetServiceCatalogTerraformDiagramForbidden |
/*GetServiceCatalogTerraformDiagramForbidden handles this case with default header values.
The authenticated user cannot perform the operation because, it doesn't have permissions for such operation.
*/
type GetServiceCatalogTerraformDiagramForbidden struct {
/*The length of the response body in octets (8-bit bytes).
*/
ContentLength uint64
Payload *models.ErrorPayload
}
func (o *GetServiceCatalogTerraformDiagramForbidden) Error() string {
return fmt.Sprintf("[GET /organizations/{organization_canonical}/service_catalogs/{service_catalog_ref}/terraform/diagram][%d] getServiceCatalogTerraformDiagramForbidden %+v", 403, o.Payload)
}
func (o *GetServiceCatalogTerraformDiagramForbidden) GetPayload() *models.ErrorPayload {
return o.Payload
}
func (o *GetServiceCatalogTerraformDiagramForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response header Content-Length
contentLength, err := swag.ConvertUint64(response.GetHeader("Content-Length"))
if err != nil {
return errors.InvalidType("Content-Length", "header", "uint64", response.GetHeader("Content-Length"))
}
o.ContentLength = contentLength
o.Payload = new(models.ErrorPayload)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetServiceCatalogTerraformDiagramNotFound creates a GetServiceCatalogTerraformDiagramNotFound with default headers values
func NewGetServiceCatalogTerraformDiagramNotFound() *GetServiceCatalogTerraformDiagramNotFound {
return &GetServiceCatalogTerraformDiagramNotFound{}
}
/*GetServiceCatalogTerraformDiagramNotFound handles this case with default header values.
The response sent when any of the entities present in the path is not found.
*/
type GetServiceCatalogTerraformDiagramNotFound struct {
/*The length of the response body in octets (8-bit bytes).
*/
ContentLength uint64
Payload *models.ErrorPayload
}
func (o *GetServiceCatalogTerraformDiagramNotFound) Error() string {
return fmt.Sprintf("[GET /organizations/{organization_canonical}/service_catalogs/{service_catalog_ref}/terraform/diagram][%d] getServiceCatalogTerraformDiagramNotFound %+v", 404, o.Payload)
}
func (o *GetServiceCatalogTerraformDiagramNotFound) GetPayload() *models.ErrorPayload {
return o.Payload
}
func (o *GetServiceCatalogTerraformDiagramNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response header Content-Length
contentLength, err := swag.ConvertUint64(response.GetHeader("Content-Length"))
if err != nil {
return errors.InvalidType("Content-Length", "header", "uint64", response.GetHeader("Content-Length"))
}
o.ContentLength = contentLength
o.Payload = new(models.ErrorPayload)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetServiceCatalogTerraformDiagramDefault creates a GetServiceCatalogTerraformDiagramDefault with default headers values
func NewGetServiceCatalogTerraformDiagramDefault(code int) *GetServiceCatalogTerraformDiagramDefault {
return &GetServiceCatalogTerraformDiagramDefault{
_statusCode: code,
}
}
/*GetServiceCatalogTerraformDiagramDefault handles this case with default header values.
The response sent when an unexpected error happened, as known as an internal server error.
*/
type GetServiceCatalogTerraformDiagramDefault struct {
_statusCode int
/*The length of the response body in octets (8-bit bytes).
*/
ContentLength uint64
Payload *models.ErrorPayload
}
// Code gets the status code for the get service catalog terraform diagram default response
func (o *GetServiceCatalogTerraformDiagramDefault) Code() int {
return o._statusCode
}
func (o *GetServiceCatalogTerraformDiagramDefault) Error() string {
return fmt.Sprintf("[GET /organizations/{organization_canonical}/service_catalogs/{service_catalog_ref}/terraform/diagram][%d] getServiceCatalogTerraformDiagram default %+v", o._statusCode, o.Payload)
}
func (o *GetServiceCatalogTerraformDiagramDefault) GetPayload() *models.ErrorPayload {
return o.Payload
}
func (o *GetServiceCatalogTerraformDiagramDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response header Content-Length
contentLength, err := swag.ConvertUint64(response.GetHeader("Content-Length"))
if err != nil {
return errors.InvalidType("Content-Length", "header", "uint64", response.GetHeader("Content-Length"))
}
o.ContentLength = contentLength
o.Payload = new(models.ErrorPayload)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
/*GetServiceCatalogTerraformDiagramOKBody get service catalog terraform diagram o k body
swagger:model GetServiceCatalogTerraformDiagramOKBody
*/
type GetServiceCatalogTerraformDiagramOKBody struct {
// data
// Required: true
Data models.TerraformJSONDiagram `json:"data"`
}
// Validate validates this get service catalog terraform diagram o k body
func (o *GetServiceCatalogTerraformDiagramOKBody) Validate(formats strfmt.Registry) error {
var res []error
if err := o.validateData(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (o *GetServiceCatalogTerraformDiagramOKBody) validateData(formats strfmt.Registry) error {
if err := validate.Required("getServiceCatalogTerraformDiagramOK"+"."+"data", "body", o.Data); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (o *GetServiceCatalogTerraformDiagramOKBody) MarshalBinary() ([]byte, error) {
if o == nil {
return nil, nil
}
return swag.WriteJSON(o)
}
// UnmarshalBinary interface implementation
func (o *GetServiceCatalogTerraformDiagramOKBody) UnmarshalBinary(b []byte) error {
var res GetServiceCatalogTerraformDiagramOKBody
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*o = res
return nil
}
| {
return &GetServiceCatalogTerraformDiagramForbidden{}
} |
test_routes.py | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021-present davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import mock
import pytest
from hikari import files
from hikari.internal import routes
from tests.hikari import hikari_test_helpers
class TestCompiledRoute:
@pytest.fixture()
def compiled_route(self):
return routes.CompiledRoute(
major_param_hash="abc123", route=mock.Mock(method="GET"), compiled_path="/some/endpoint"
)
def test_method(self, compiled_route):
assert compiled_route.method == "GET"
def test_create_url(self, compiled_route):
assert compiled_route.create_url("https://some.url/api") == "https://some.url/api/some/endpoint"
def test_create_real_bucket_hash(self, compiled_route):
assert compiled_route.create_real_bucket_hash("UNKNOWN") == "UNKNOWN;abc123"
def test__str__(self, compiled_route):
assert str(compiled_route) == "GET /some/endpoint"
class TestRoute:
@pytest.mark.parametrize(
("route", "params"),
[
(routes.DELETE_CHANNEL, frozenset(("channel",))),
(routes.PATCH_GUILD, frozenset(("guild",))),
(routes.POST_WEBHOOK_WITH_TOKEN, frozenset(("webhook", "token"))),
(routes.GET_INVITE, None),
],
)
def test_major_params(self, route, params):
assert route.major_params == params
def test_compile_with_no_major_params(self):
route = routes.Route(method="GET", path_template="/some/endpoint/{baguette}")
expected = routes.CompiledRoute(route=route, compiled_path="/some/endpoint/1234", major_param_hash="-")
assert route.compile(baguette=1234) == expected
def test_compile_with_channel_major_params(self):
route = routes.Route(method="GET", path_template="/channels/{channel}")
expected = routes.CompiledRoute(route=route, compiled_path="/channels/4325", major_param_hash="4325")
assert route.compile(channel=4325) == expected
def test_compile_with_guild_major_params(self):
route = routes.Route(method="GET", path_template="/guilds/{guild}")
expected = routes.CompiledRoute(route=route, compiled_path="/guilds/5555", major_param_hash="5555")
assert route.compile(guild=5555) == expected
def test_compile_with_webhook_major_params(self):
route = routes.Route(method="GET", path_template="/webhooks/{webhook}/{token}")
expected = routes.CompiledRoute(
route=route, compiled_path="/webhooks/123/okfdkdfkdf", major_param_hash="123:okfdkdfkdf"
)
assert route.compile(webhook=123, token="okfdkdfkdf") == expected
def test__str__(self):
assert str(routes.Route(method="GET", path_template="/some/endpoint/{channel}")) == "/some/endpoint/{channel}"
class TestCDNRoute:
def test_zero_formats_results_in_error(self):
with pytest.raises(ValueError, match="/foo/bar must have at least one valid format set"):
routes.CDNRoute("/foo/bar", set())
def test_any_formats_results_in_no_error(self):
routes.CDNRoute("/foo/bar", {"do", "ray", "me"})
def test_formats_converted_to_frozenset(self):
route = routes.CDNRoute("/foo/bar", {"i", "really", "like", "cats"})
assert isinstance(route.valid_formats, frozenset)
assert route.valid_formats == {"i", "really", "like", "cats"}
def test_formats_converted_to_lower(self):
route = routes.CDNRoute("/foo/bar", {"FOO", "BaR", "bAz", "bork"})
assert route.valid_formats == {"foo", "bar", "baz", "bork"}
def test_eq_operator__considers_path_template_only(self):
route1 = routes.CDNRoute("/foo/bar", {"hello", "world"}, sizable=False)
route2 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=True)
route3 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=False)
route4 = routes.CDNRoute("/foo/bar/baz", {"i", "said", "meow"}, sizable=True)
assert route1 == route2
assert route1 == route3
assert route1 != route4
assert route2 == route3
assert route2 != route4
assert route3 != route4
def test_hash_operator_considers_path_template_only(self):
route1 = routes.CDNRoute("/foo/bar", {"hello", "world"}, sizable=False)
route2 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=True)
route3 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=False)
route4 = routes.CDNRoute("/foo/bar/baz", {"i", "said", "meow"}, sizable=True)
assert hash(route1) == hash(route2)
assert hash(route1) == hash(route3)
assert hash(route1) != hash(route4)
assert hash(route2) == hash(route3)
assert hash(route2) != hash(route4)
assert hash(route3) != hash(route4)
@pytest.mark.parametrize(
("input_file_format", "expected_file_format"),
[
("jpg", "jpg"),
("JPG", "jpg"),
("png", "png"),
("PNG", "png"),
],
)
def test_compile_uses_lowercase_file_format_always(self, input_file_format, expected_file_format):
route = routes.CDNRoute("/foo/bar", {"png", "jpg"}, sizable=False)
compiled_url = route.compile("http://example.com", file_format=input_file_format)
assert compiled_url.endswith(f".{expected_file_format}"), f"compiled_url={compiled_url}"
def test_disallowed_file_format_raises_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg"}, sizable=False)
with pytest.raises(TypeError):
route.compile("http://example.com", file_format="gif")
def test_allowed_file_format_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg"}, sizable=False)
route.compile("http://example.com", file_format="png")
def test_requesting_gif_on_non_animated_hash_raises_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
with pytest.raises(TypeError):
route.compile("http://example.com", file_format="gif", hash="boooob")
@pytest.mark.parametrize("format", ["png", "jpg", "webp"])
def test_requesting_non_gif_on_non_animated_hash_does_not_raise_TypeError(self, format):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "webp", "gif"}, sizable=False)
route.compile("http://example.com", file_format=format, hash="boooob")
@pytest.mark.parametrize("format", ["png", "jpg", "webp"])
def test_requesting_non_gif_on_animated_hash_does_not_raise_TypeError(self, format):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "webp", "gif"}, sizable=False)
route.compile("http://example.com", file_format=format, hash="a_boooob")
def test_requesting_gif_on_animated_hash_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
route.compile("http://example.com", file_format="gif", hash="a_boooob")
def test_requesting_gif_without_passing_hash_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
route.compile("http://example.com", file_format="gif")
def test_passing_size_on_non_sizable_raises_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
with pytest.raises(TypeError):
route.compile("http://example.com", file_format="png", hash="boooob", size=128)
def test_passing_size_on_sizable_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
route.compile("http://example.com", file_format="png", hash="boooob", size=128)
def test_passing_no_size_on_non_sizable_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
route.compile("http://example.com", file_format="png", hash="boooob")
def test_passing_no_size_on_sizable_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
route.compile("http://example.com", file_format="png", hash="boooob")
@pytest.mark.parametrize("size", [*range(17, 32)])
def test_passing_non_power_of_2_sizes_to_sizable_raises_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
with pytest.raises(ValueError, match="size must be an integer power of 2 between 16 and 4096 inclusive"):
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
@pytest.mark.parametrize("size", [int(2 ** size) for size in [1, *range(17, 25)]])
def test_passing_invalid_magnitude_sizes_to_sizable_raises_ValueError(self, size): |
@pytest.mark.parametrize("size", [*range(-10, 0)])
def test_passing_negative_sizes_to_sizable_raises_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "png"}, sizable=True)
with pytest.raises(ValueError, match="size must be positive"):
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
@pytest.mark.parametrize("size", [int(2 ** size) for size in range(4, 13)])
def test_passing_valid_sizes_to_sizable_does_not_raise_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
def test_passing_size_adds_query_string(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
compiled_url = route.compile("http://example.com", file_format="png", hash="boooob", size=128)
assert compiled_url.endswith(".png?size=128"), f"compiled_url={compiled_url}"
def test_passing_None_size_does_not_add_query_string(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
compiled_url = route.compile("http://example.com", file_format="png", hash="boooob", size=None)
assert "?size=" not in compiled_url, f"compiled_url={compiled_url}"
def test_passing_no_size_does_not_add_query_string(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
compiled_url = route.compile("http://example.com", file_format="png", hash="boooob")
assert "?size=" not in compiled_url, f"compiled_url={compiled_url}"
@pytest.mark.parametrize(
("base_url", "template", "format", "size_kwds", "foo", "bar", "expected_url"),
[
(
"http://example.com",
"/{foo}/{bar}",
"PNG",
{"size": 128},
"baz",
"bork qux",
"http://example.com/baz/bork%20qux.png?size=128",
),
(
"http://example.com",
"/{foo}/bar",
"jpg",
{"size": 128},
"baz",
"bork qux",
"http://example.com/baz/bar.jpg?size=128",
),
(
"http://example.com",
"/{foo}/{bar}",
"WEBP",
{"size": None},
"baz",
123456,
"http://example.com/baz/123456.webp",
),
(
"http://example.com",
"/{foo}/bar",
"GIF",
{"size": None},
"baz",
"bork qux",
"http://example.com/baz/bar.gif",
),
(
"http://example.com",
"/{foo}/{bar}",
"WEBP",
{},
"baz",
"bork qux",
"http://example.com/baz/bork%20qux.webp",
),
(
"http://example.com",
"/{foo}/bar",
"GIF",
{},
"baz",
"bork qux",
"http://example.com/baz/bar.gif",
),
],
)
def test_compile_generates_expected_url(self, base_url, template, format, size_kwds, foo, bar, expected_url):
route = routes.CDNRoute(template, {"png", "gif", "jpg", "webp"}, sizable=True)
actual_url = route.compile(base_url=base_url, file_format=format, foo=foo, bar=bar, **size_kwds)
assert actual_url == expected_url
@pytest.mark.parametrize("format", ["png", "jpg"])
@pytest.mark.parametrize("size", [64, 256, 2048])
def test_compile_to_file_calls_compile(self, format, size):
with mock.patch.object(files, "URL", autospec=files.URL):
route = hikari_test_helpers.mock_class_namespace(routes.CDNRoute, slots_=False)(
"/hello/world", {"png", "jpg"}, sizable=True
)
route.compile = mock.Mock(spec_set=route.compile)
route.compile_to_file("https://blep.com", file_format=format, size=size, boop="oyy lumo", nya="weeb")
route.compile.assert_called_once_with(
"https://blep.com", file_format=format, size=size, boop="oyy lumo", nya="weeb"
)
def test_compile_to_file_passes_compile_result_to_URL_and_returns_constructed_url(self):
resultant_url_str = "http://blep.com/hello/world/weeb/oyy%20lumo"
resultant_url = files.URL("http://blep.com/hello/world/weeb/oyy%20lumo")
with mock.patch.object(files, "URL", autospec=files.URL, return_value=resultant_url) as URL:
route = hikari_test_helpers.mock_class_namespace(routes.CDNRoute, slots_=False)(
"/hello/world/{nya}/{boop}", {"png", "jpg"}, sizable=True
)
route.compile = mock.Mock(spec_set=route.compile, return_value=resultant_url_str)
result = route.compile_to_file("https://blep.com", file_format="png", size=64, boop="oyy lumo", nya="weeb")
URL.assert_called_once_with(resultant_url_str)
assert result is resultant_url | route = routes.CDNRoute("/foo/bar", {"png", "jpg", "png"}, sizable=True)
with pytest.raises(ValueError, match="size must be an integer power of 2 between 16 and 4096 inclusive"):
route.compile("http://example.com", file_format="png", hash="boooob", size=size) |
lib.rs | #[derive(Debug)]
pub struct Rectangle {
length: u32,
width: u32,
} | pub fn can_hold(&self, other: &Rectangle) -> bool {
self.length > other.length && self.width > other.width
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn larger_can_hold_smaller() {
let larger = Rectangle {
length: 8,
width: 7,
};
let smaller = Rectangle {
length: 5,
width: 1,
};
assert!(larger.can_hold(&smaller));
}
#[test]
fn smaller_can_hold_larger() {
let larger = Rectangle {
length: 8,
width: 7,
};
let smaller = Rectangle {
length: 5,
width: 1,
};
assert!(!smaller.can_hold(&larger));
}
} |
impl Rectangle { |
phone_call_logic.go | /*
* Copyright (c) 2018, https://github.com/airwide-code
* All rights reserved.
*
*
*
*/
package phone_call
import (
"github.com/airwide-code/airwide.datacenter/mtproto"
"github.com/airwide-code/airwide.datacenter/biz/base"
"math/rand"
"time"
"github.com/airwide-code/airwide.datacenter/biz/dal/dataobject"
"github.com/airwide-code/airwide.datacenter/biz/dal/dao"
base2 "github.com/airwide-code/airwide.datacenter/baselib/base"
"encoding/hex"
"fmt"
)
// TODO(@benqi): Using redis storage phone_call_sessions
type phoneCallLogic PhoneCallSession
func NewPhoneCallLogic(adminId, participantId int32, ga []byte, protocol *mtproto.TLPhoneCallProtocol) *phoneCallLogic {
session := &phoneCallLogic{
Id: base.NextSnowflakeId(),
AdminId: adminId,
AdminAccessHash: rand.Int63(),
ParticipantId: participantId,
ParticipantAccessHash: rand.Int63(),
UdpP2P: protocol.GetUdpP2P(),
UdpReflector: protocol.GetUdpReflector(),
MinLayer: protocol.GetMinLayer(),
MaxLayer: protocol.GetMaxLayer(),
GA: ga,
State: 0,
Date: time.Now().Unix(),
}
do := &dataobject.PhoneCallSessionsDO{
CallSessionId: session.Id,
AdminId: session.AdminId,
AdminAccessHash: session.AdminAccessHash,
ParticipantId: session.ParticipantId,
ParticipantAccessHash: session.ParticipantAccessHash,
UdpP2p: base2.BoolToInt8(session.UdpP2P),
UdpReflector: base2.BoolToInt8(session.UdpReflector),
MinLayer: session.MinLayer,
MaxLayer: session.MaxLayer,
GA: hex.EncodeToString(session.GA),
Date: int32(session.Date),
}
dao.GetPhoneCallSessionsDAO(dao.DB_MASTER).Insert(do)
return session
}
func MakePhoneCallLogcByLoad(id int64) (*phoneCallLogic, error) {
do := dao.GetPhoneCallSessionsDAO(dao.DB_SLAVE).Select(id)
if do == nil {
err := fmt.Errorf("not found call session: %d", id)
return nil, err
}
session := &phoneCallLogic{
Id: do.CallSessionId,
AdminId: do.AdminId,
AdminAccessHash: do.AdminAccessHash,
ParticipantId: do.ParticipantId,
ParticipantAccessHash: do.ParticipantAccessHash,
UdpP2P: do.UdpP2p == 1,
UdpReflector: do.UdpReflector == 1,
MinLayer: do.MinLayer,
MaxLayer: do.MaxLayer,
// GA: do.GA,
State: 0,
Date: int64(do.Date),
}
session.GA, _ = hex.DecodeString(do.GA)
return session, nil
}
func (p *phoneCallLogic) SetGB(gb []byte) {
p.GB = gb
dao.GetPhoneCallSessionsDAO(dao.DB_MASTER).UpdateGB(hex.EncodeToString(gb), p.Id)
}
func (p *phoneCallLogic) SetAdminDebugData(dataJson string) {
dao.GetPhoneCallSessionsDAO(dao.DB_MASTER).UpdateAdminDebugData(dataJson, p.Id)
}
func (p *phoneCallLogic) SetParticipantDebugData(dataJson string) {
dao.GetPhoneCallSessionsDAO(dao.DB_MASTER).UpdateParticipantDebugData(dataJson, p.Id)
}
func (p *phoneCallLogic) toPhoneCallProtocol() *mtproto.PhoneCallProtocol {
return &mtproto.PhoneCallProtocol{
Constructor: mtproto.TLConstructor_CRC32_phoneCallProtocol,
Data2: &mtproto.PhoneCallProtocol_Data{
UdpP2P: p.UdpP2P,
UdpReflector: p.UdpReflector,
MinLayer: p.MinLayer,
MaxLayer: p.MaxLayer,
},
}
}
func (p *phoneCallLogic) ToPhoneCallProtocol() *mtproto.PhoneCallProtocol {
return p.toPhoneCallProtocol()
}
// phoneCallRequested#83761ce4 id:long access_hash:long date:int admin_id:int participant_id:int g_a_hash:bytes protocol:PhoneCallProtocol = PhoneCall;
func (p *phoneCallLogic) ToPhoneCallRequested() *mtproto.TLPhoneCallRequested {
return &mtproto.TLPhoneCallRequested{Data2: &mtproto.PhoneCall_Data{
Id: p.Id,
AccessHash: p.ParticipantAccessHash,
Date: int32(p.Date),
AdminId: p.AdminId,
ParticipantId: p.ParticipantId,
GAHash: p.GA,
Protocol: p.toPhoneCallProtocol(),
}}
}
// phoneCallWaiting#1b8f4ad1 flags:# id:long access_hash:long date:int admin_id:int participant_id:int protocol:PhoneCallProtocol receive_date:flags.0?int = PhoneCall;
func (p *phoneCallLogic) ToPhoneCallWaiting(selfId int32, receiveDate int32) *mtproto.TLPhoneCallWaiting {
var (
accessHash int64
)
if selfId == p.AdminId {
accessHash = p.AdminAccessHash
} else {
accessHash = p.ParticipantAccessHash
}
return &mtproto.TLPhoneCallWaiting{Data2: &mtproto.PhoneCall_Data{
Id: p.Id,
AccessHash: accessHash,
Date: int32(p.Date),
AdminId: p.AdminId,
ParticipantId: p.ParticipantId,
GAHash: p.GA,
Protocol: p.toPhoneCallProtocol(),
ReceiveDate: receiveDate,
}}
}
// phoneCallAccepted#6d003d3f id:long access_hash:long date:int admin_id:int participant_id:int g_b:bytes protocol:PhoneCallProtocol = PhoneCall;
func (p *phoneCallLogic) ToPhoneCallAccepted() *mtproto.TLPhoneCallAccepted {
return &mtproto.TLPhoneCallAccepted{Data2: &mtproto.PhoneCall_Data{
Id: p.Id,
AccessHash: p.AdminAccessHash,
Date: int32(p.Date),
AdminId: p.AdminId,
ParticipantId: p.ParticipantId,
GB: p.GB,
Protocol: p.toPhoneCallProtocol(),
}}
}
// phoneConnection#9d4c17c0 id:long ip:string ipv6:string port:int peer_tag:bytes = PhoneConnection;
func makeConnection() *mtproto.PhoneConnection {
return &mtproto.PhoneConnection{
Constructor: mtproto.TLConstructor_CRC32_phoneConnection,
Data2: &mtproto.PhoneConnection_Data{
Id: 50003,
// Ip: "192.168.4.32",
Ip: "192.168.1.104",
Ipv6: "",
Port: 50001,
PeerTag: []byte("24ffcbeb7980d28b"),
},
}
}
// phoneCall#ffe6ab67 id:long access_hash:long date:int admin_id:int participant_id:int g_a_or_b:bytes key_fingerprint:long protocol:PhoneCallProtocol connection:PhoneConnection alternative_connections:Vector<PhoneConnection> start_date:int = PhoneCall;
func (p *phoneCallLogic) ToPhoneCall(selfId int32, keyFingerprint int64) *mtproto.TLPhoneCall {
var (
accessHash int64
gaOrGb []byte
)
if selfId == p.AdminId {
accessHash = p.AdminAccessHash
gaOrGb = p.GB
} else {
accessHash = p.ParticipantAccessHash
gaOrGb = p.GA
}
return &mtproto.TLPhoneCall{Data2: &mtproto.PhoneCall_Data{
Id: p.Id,
AccessHash: accessHash,
Date: int32(p.Date), | AdminId: p.AdminId,
ParticipantId: p.ParticipantId,
GAOrB: gaOrGb,
KeyFingerprint: keyFingerprint,
Protocol: p.toPhoneCallProtocol(),
Connection: makeConnection(),
AlternativeConnections: []*mtproto.PhoneConnection{}, // TODO(@benqi):
StartDate: 0,
}}
} | |
provisioner_test.go | // Copyright 2013 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package docker
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"runtime"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/docker/docker/pkg/stdcopy"
docker "github.com/fsouza/go-dockerclient"
"github.com/fsouza/go-dockerclient/testing"
"github.com/globalsign/mgo/bson"
"github.com/tsuru/config"
"github.com/tsuru/docker-cluster/cluster"
"github.com/tsuru/tsuru/app"
"github.com/tsuru/tsuru/app/bind"
"github.com/tsuru/tsuru/errors"
"github.com/tsuru/tsuru/event"
"github.com/tsuru/tsuru/net"
"github.com/tsuru/tsuru/permission"
"github.com/tsuru/tsuru/provision"
"github.com/tsuru/tsuru/provision/docker/container"
internalNodeContainer "github.com/tsuru/tsuru/provision/docker/nodecontainer"
"github.com/tsuru/tsuru/provision/docker/types"
"github.com/tsuru/tsuru/provision/dockercommon"
"github.com/tsuru/tsuru/provision/nodecontainer"
"github.com/tsuru/tsuru/provision/pool"
"github.com/tsuru/tsuru/provision/provisiontest"
"github.com/tsuru/tsuru/queue"
"github.com/tsuru/tsuru/router/routertest"
"github.com/tsuru/tsuru/safe"
"github.com/tsuru/tsuru/servicemanager"
provTypes "github.com/tsuru/tsuru/types/provision"
"github.com/tsuru/tsuru/types/quota"
check "gopkg.in/check.v1"
)
func newFakeServer() *httptest.Server |
func (s *S) TestShouldBeRegistered(c *check.C) {
p, err := provision.Get("docker")
c.Assert(err, check.IsNil)
c.Assert(p, check.FitsTypeOf, &dockerProvisioner{})
}
func (s *S) TestProvisionerProvision(c *check.C) {
app := provisiontest.NewFakeApp("myapp", "python", 1)
err := s.p.Provision(app)
c.Assert(err, check.IsNil)
c.Assert(routertest.FakeRouter.HasBackend("myapp"), check.Equals, true)
}
func (s *S) TestProvisionerRestart(c *check.C) {
app := provisiontest.NewFakeApp("almah", "static", 1)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python web.py",
"worker": "python worker.py",
},
}
cont1, err := s.newContainer(&newContainerOpts{
AppName: app.GetName(),
ProcessName: "web",
ImageCustomData: customData,
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont1)
cont2, err := s.newContainer(&newContainerOpts{
AppName: app.GetName(),
ProcessName: "worker",
ImageCustomData: customData,
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont2)
err = s.p.Start(app, "")
c.Assert(err, check.IsNil)
dockerContainer, err := s.p.Cluster().InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
dockerContainer, err = s.p.Cluster().InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
err = s.p.Restart(app, "", nil)
c.Assert(err, check.IsNil)
dbConts, err := s.p.listAllContainers()
c.Assert(err, check.IsNil)
c.Assert(dbConts, check.HasLen, 2)
c.Assert(dbConts[0].ID, check.Not(check.Equals), cont1.ID)
c.Assert(dbConts[0].AppName, check.Equals, app.GetName())
c.Assert(dbConts[0].Status, check.Equals, provision.StatusStarting.String())
c.Assert(dbConts[1].ID, check.Not(check.Equals), cont2.ID)
c.Assert(dbConts[1].AppName, check.Equals, app.GetName())
c.Assert(dbConts[1].Status, check.Equals, provision.StatusStarting.String())
dockerContainer, err = s.p.Cluster().InspectContainer(dbConts[0].ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
expectedIP := dockerContainer.NetworkSettings.IPAddress
expectedPort := dockerContainer.NetworkSettings.Ports["8888/tcp"][0].HostPort
c.Assert(dbConts[0].IP, check.Equals, expectedIP)
c.Assert(dbConts[0].HostPort, check.Equals, expectedPort)
}
func (s *S) TestProvisionerRestartStoppedContainer(c *check.C) {
app := provisiontest.NewFakeApp("almah", "static", 1)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python web.py",
"worker": "python worker.py",
},
}
cont1, err := s.newContainer(&newContainerOpts{
AppName: app.GetName(),
ProcessName: "web",
ImageCustomData: customData,
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont1)
cont2, err := s.newContainer(&newContainerOpts{
AppName: app.GetName(),
ProcessName: "worker",
ImageCustomData: customData,
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont2)
err = s.p.Stop(app, "")
c.Assert(err, check.IsNil)
dockerContainer, err := s.p.Cluster().InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, false)
dockerContainer, err = s.p.Cluster().InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, false)
err = s.p.Restart(app, "", nil)
c.Assert(err, check.IsNil)
dbConts, err := s.p.listAllContainers()
c.Assert(err, check.IsNil)
c.Assert(dbConts, check.HasLen, 2)
c.Assert(dbConts[0].ID, check.Not(check.Equals), cont1.ID)
c.Assert(dbConts[0].AppName, check.Equals, app.GetName())
c.Assert(dbConts[0].Status, check.Equals, provision.StatusStarting.String())
c.Assert(dbConts[1].ID, check.Not(check.Equals), cont1.ID)
c.Assert(dbConts[1].AppName, check.Equals, app.GetName())
c.Assert(dbConts[1].Status, check.Equals, provision.StatusStarting.String())
dockerContainer, err = s.p.Cluster().InspectContainer(dbConts[0].ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
expectedIP := dockerContainer.NetworkSettings.IPAddress
expectedPort := dockerContainer.NetworkSettings.Ports["8888/tcp"][0].HostPort
c.Assert(dbConts[0].IP, check.Equals, expectedIP)
c.Assert(dbConts[0].HostPort, check.Equals, expectedPort)
}
type containerByProcessList []container.Container
func (l containerByProcessList) Len() int { return len(l) }
func (l containerByProcessList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l containerByProcessList) Less(i, j int) bool { return l[i].ProcessName < l[j].ProcessName }
func (s *S) TestProvisionerRestartProcess(c *check.C) {
app := provisiontest.NewFakeApp("almah", "static", 1)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python web.py",
"worker": "python worker.py",
},
}
cont1, err := s.newContainer(&newContainerOpts{
AppName: app.GetName(),
ProcessName: "web",
ImageCustomData: customData,
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont1)
cont2, err := s.newContainer(&newContainerOpts{
AppName: app.GetName(),
ProcessName: "worker",
ImageCustomData: customData,
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont2)
err = s.p.Start(app, "")
c.Assert(err, check.IsNil)
dockerContainer, err := s.p.Cluster().InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
dockerContainer, err = s.p.Cluster().InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
err = s.p.Restart(app, "web", nil)
c.Assert(err, check.IsNil)
dbConts, err := s.p.listAllContainers()
c.Assert(err, check.IsNil)
c.Assert(dbConts, check.HasLen, 2)
sort.Sort(containerByProcessList(dbConts))
c.Assert(dbConts[1].ID, check.Equals, cont2.ID)
c.Assert(dbConts[0].ID, check.Not(check.Equals), cont1.ID)
c.Assert(dbConts[0].AppName, check.Equals, app.GetName())
c.Assert(dbConts[0].Status, check.Equals, provision.StatusStarting.String())
dockerContainer, err = s.p.Cluster().InspectContainer(dbConts[0].ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
expectedIP := dockerContainer.NetworkSettings.IPAddress
expectedPort := dockerContainer.NetworkSettings.Ports["8888/tcp"][0].HostPort
c.Assert(dbConts[0].IP, check.Equals, expectedIP)
c.Assert(dbConts[0].HostPort, check.Equals, expectedPort)
}
func (s *S) stopContainers(endpoint string, n uint) <-chan bool {
ch := make(chan bool)
go func() {
defer close(ch)
client, err := docker.NewClient(endpoint)
if err != nil {
return
}
for n > 0 {
opts := docker.ListContainersOptions{All: false}
containers, err := client.ListContainers(opts)
if err != nil {
return
}
if len(containers) > 0 {
for _, cont := range containers {
if cont.ID != "" {
client.StopContainer(cont.ID, 1)
n--
}
}
}
time.Sleep(500 * time.Millisecond)
}
}()
return ch
}
func (s *S) TestDeploy(c *check.C) {
config.Unset("docker:repository-namespace")
defer config.Set("docker:repository-namespace", "tsuru")
stopCh := s.stopContainers(s.server.URL(), 1)
defer func() { <-stopCh }()
a := s.newApp("myapp")
err := app.CreateApp(&a, s.user)
s.mockService.AppQuota.OnSet = func(appName string, inUse int) error {
c.Assert(appName, check.Equals, "myapp")
c.Assert(inUse, check.Equals, 1)
a.Quota.InUse = 1
return nil
}
c.Assert(err, check.IsNil)
var serviceBodies []string
rollback := s.addServiceInstance(c, a.Name, nil, func(w http.ResponseWriter, r *http.Request) {
data, _ := ioutil.ReadAll(r.Body)
serviceBodies = append(serviceBodies, string(data))
w.WriteHeader(http.StatusOK)
})
defer rollback()
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python myapp.py",
},
}
version, err := newVersionForApp(s.p, &a, customData)
c.Assert(err, check.IsNil)
evt, err := event.New(&event.Opts{
Target: event.Target{Type: "app", Value: a.Name},
Kind: permission.PermAppDeploy,
Owner: s.token,
Allowed: event.Allowed(permission.PermApp),
})
c.Assert(err, check.IsNil)
imgID, err := s.p.Deploy(&a, version, evt)
c.Assert(err, check.IsNil)
c.Assert(imgID, check.Equals, "tsuru/app-"+a.Name+":v1")
units, err := a.Units()
c.Assert(err, check.IsNil)
c.Assert(units, check.HasLen, 1)
c.Assert(serviceBodies, check.HasLen, 1)
c.Assert(serviceBodies[0], check.Matches, ".*unit-host="+units[0].IP)
c.Assert(a.Quota, check.DeepEquals, quota.Quota{Limit: -1, InUse: 1})
cont, err := s.p.Cluster().InspectContainer(units[0].GetID())
c.Assert(err, check.IsNil)
c.Assert(cont.Config.Cmd, check.DeepEquals, []string{
"/bin/sh",
"-lc",
"[ -d /home/application/current ] && cd /home/application/current; exec python myapp.py",
})
}
func (s *S) TestDeployWithLimiterActive(c *check.C) {
config.Set("docker:limit:actions-per-host", 1)
defer config.Unset("docker:limit:actions-per-host")
var p dockerProvisioner
p.storage = &cluster.MapStorage{}
err := p.Initialize()
c.Assert(err, check.IsNil)
mainDockerProvisioner = &p
p.cluster, err = cluster.New(nil, p.storage, "",
cluster.Node{Address: s.server.URL(), Metadata: map[string]string{"pool": "test-default"}},
)
c.Assert(err, check.IsNil)
stopCh := s.stopContainers(s.server.URL(), 1)
defer func() { <-stopCh }()
a := s.newApp("otherapp")
err = app.CreateApp(&a, s.user)
c.Assert(err, check.IsNil)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python myapp.py",
},
}
version, err := newVersionForApp(s.p, &a, customData)
c.Assert(err, check.IsNil)
evt, err := event.New(&event.Opts{
Target: event.Target{Type: "app", Value: a.Name},
Kind: permission.PermAppDeploy,
Owner: s.token,
Allowed: event.Allowed(permission.PermApp),
})
c.Assert(err, check.IsNil)
fakeServer := newFakeServer()
defer fakeServer.Close()
_, err = s.p.Deploy(&a, version, evt)
c.Assert(err, check.IsNil)
units, err := a.Units()
c.Assert(err, check.IsNil)
c.Assert(units, check.HasLen, 1)
hostAddr := net.URLToHost(s.server.URL())
c.Assert(p.ActionLimiter().Len(hostAddr), check.Equals, 0)
err = p.Destroy(&a)
c.Assert(err, check.IsNil)
c.Assert(p.ActionLimiter().Len(hostAddr), check.Equals, 0)
}
func (s *S) TestDeployWithLimiterGlobalActive(c *check.C) {
config.Set("docker:limit:mode", "global")
config.Set("docker:limit:actions-per-host", 1)
defer config.Unset("docker:limit:mode")
defer config.Unset("docker:limit:actions-per-host")
var p dockerProvisioner
p.storage = &cluster.MapStorage{}
err := p.Initialize()
c.Assert(err, check.IsNil)
mainDockerProvisioner = &p
p.cluster, err = cluster.New(nil, p.storage, "",
cluster.Node{Address: s.server.URL(), Metadata: map[string]string{"pool": "test-default"}},
)
c.Assert(err, check.IsNil)
stopCh := s.stopContainers(s.server.URL(), 1)
defer func() { <-stopCh }()
a := s.newApp("otherapp")
err = app.CreateApp(&a, s.user)
c.Assert(err, check.IsNil)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python myapp.py",
},
}
version, err := newVersionForApp(s.p, &a, customData)
c.Assert(err, check.IsNil)
evt, err := event.New(&event.Opts{
Target: event.Target{Type: "app", Value: a.Name},
Kind: permission.PermAppDeploy,
Owner: s.token,
Allowed: event.Allowed(permission.PermApp),
})
c.Assert(err, check.IsNil)
fakeServer := newFakeServer()
defer fakeServer.Close()
imgID, err := s.p.Deploy(&a, version, evt)
c.Assert(err, check.IsNil)
c.Assert(imgID, check.Equals, "tsuru/app-"+a.Name+":v1")
units, err := a.Units()
c.Assert(err, check.IsNil)
c.Assert(units, check.HasLen, 1)
hostAddr := net.URLToHost(s.server.URL())
c.Assert(p.ActionLimiter().Len(hostAddr), check.Equals, 0)
err = p.Destroy(&a)
c.Assert(err, check.IsNil)
c.Assert(p.ActionLimiter().Len(hostAddr), check.Equals, 0)
}
func (s *S) TestDeployQuotaExceeded(c *check.C) {
stopCh := s.stopContainers(s.server.URL(), 1)
defer func() { <-stopCh }()
a := s.newApp("otherapp")
err := app.CreateApp(&a, s.user)
s.mockService.AppQuota.OnSetLimit = func(appName string, limit int) error {
c.Assert(appName, check.Equals, "otherapp")
c.Assert(limit, check.Equals, 1)
a.Quota.Limit = 1
return nil
}
s.mockService.AppQuota.OnSet = func(appName string, quantity int) error {
c.Assert(appName, check.Equals, "otherapp")
c.Assert(quantity, check.Equals, 2)
return "a.QuotaExceededError{Available: 1, Requested: 2}
}
c.Assert(err, check.IsNil)
err = a.SetQuotaLimit(1)
c.Assert(err, check.IsNil)
var serviceBodies []string
rollback := s.addServiceInstance(c, a.Name, nil, func(w http.ResponseWriter, r *http.Request) {
data, _ := ioutil.ReadAll(r.Body)
serviceBodies = append(serviceBodies, string(data))
w.WriteHeader(http.StatusOK)
})
defer rollback()
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python myapp.py",
"worker": "python myworker.py",
},
}
version, err := newVersionForApp(s.p, &a, customData)
c.Assert(err, check.IsNil)
evt, err := event.New(&event.Opts{
Target: event.Target{Type: "app", Value: a.Name},
Kind: permission.PermAppDeploy,
Owner: s.token,
Allowed: event.Allowed(permission.PermApp),
})
c.Assert(err, check.IsNil)
fakeServer := newFakeServer()
defer fakeServer.Close()
_, err = s.p.Deploy(&a, version, evt)
c.Assert(err, check.NotNil)
compErr, ok := err.(*errors.CompositeError)
c.Assert(ok, check.Equals, true)
c.Assert(compErr.Message, check.Equals, "Cannot start application units")
e, ok := compErr.Base.(*quota.QuotaExceededError)
c.Assert(ok, check.Equals, true)
c.Assert(e.Available, check.Equals, uint(1))
c.Assert(e.Requested, check.Equals, uint(2))
}
func (s *S) TestDeployCanceledEvent(c *check.C) {
app := provisiontest.NewFakeApp("myapp", "python", 1)
routertest.FakeRouter.AddBackend(app)
defer routertest.FakeRouter.RemoveBackend(app.GetName())
evt, err := event.New(&event.Opts{
Target: event.Target{Type: "app", Value: "myapp"},
Kind: permission.PermAppDeploy,
Owner: s.token,
Cancelable: true,
Allowed: event.Allowed(permission.PermApp),
AllowedCancel: event.Allowed(permission.PermApp),
})
c.Assert(err, check.IsNil)
fakeServer := newFakeServer()
defer fakeServer.Close()
version, err := newVersionForApp(s.p, app, nil)
c.Assert(err, check.IsNil)
done := make(chan bool)
go func() {
defer close(done)
img, depErr := s.p.Deploy(app, version, evt)
c.Assert(depErr, check.ErrorMatches, "unit creation canceled by user action")
c.Assert(img, check.Equals, "")
}()
time.Sleep(100 * time.Millisecond)
evtDB, err := event.GetByID(evt.UniqueID)
c.Assert(err, check.IsNil)
err = evtDB.TryCancel("because yes", "[email protected]")
c.Assert(err, check.IsNil)
<-done
}
func (s *S) TestDeployRegisterRace(c *check.C) {
originalMaxProcs := runtime.GOMAXPROCS(10)
defer runtime.GOMAXPROCS(originalMaxProcs)
var p dockerProvisioner
var registerCount int64
server, err := testing.NewServer("127.0.0.1:0", nil, func(r *http.Request) {
go func(path string) {
parts := strings.Split(path, "/")
if len(parts) == 4 && parts[3] == "start" {
registerErr := p.RegisterUnit(nil, parts[2], nil)
if registerErr == nil {
atomic.AddInt64(®isterCount, 1)
} else {
c.Fatal(registerErr)
}
}
}(r.URL.Path)
})
c.Assert(err, check.IsNil)
defer server.Stop()
config.Set("docker:registry", "localhost:3030")
defer config.Unset("docker:registry")
err = p.Initialize()
c.Assert(err, check.IsNil)
p.cluster, err = cluster.New(nil, &cluster.MapStorage{}, "",
cluster.Node{Address: server.URL()})
c.Assert(err, check.IsNil)
nTests := 100
stopCh := s.stopContainers(server.URL(), uint(nTests))
defer func() { <-stopCh }()
wg := sync.WaitGroup{}
for i := 0; i < nTests; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
name := fmt.Sprintf("myapp-%d", i)
app := provisiontest.NewFakeApp(name, "python", 1)
version, err := newVersionForApp(&p, app, nil)
c.Assert(err, check.IsNil)
routertest.FakeRouter.AddBackend(app)
defer routertest.FakeRouter.RemoveBackend(app.GetName())
img, err := p.deployPipeline(app, version, []string{"/bin/test"}, nil)
c.Assert(err, check.IsNil)
c.Assert(img, check.Equals, "localhost:3030/tsuru/app-"+name+":v1")
}(i)
}
wg.Wait()
c.Assert(registerCount, check.Equals, int64(nTests))
}
func (s *S) TestRollbackDeploy(c *check.C) {
a := s.newApp("otherapp")
a.Quota = quota.UnlimitedQuota
err := app.CreateApp(&a, s.user)
c.Assert(err, check.IsNil)
version, err := newSuccessfulVersionForApp(s.p, &a, nil)
c.Assert(err, check.IsNil)
w := safe.NewBuffer(make([]byte, 2048))
evt, err := event.New(&event.Opts{
Target: event.Target{Type: "app", Value: a.Name},
Kind: permission.PermAppDeploy,
Owner: s.token,
Allowed: event.Allowed(permission.PermApp),
})
c.Assert(err, check.IsNil)
_, err = app.Deploy(app.DeployOptions{
App: &a,
OutputStream: w,
Image: version.BaseImageName(),
Rollback: true,
Event: evt,
})
c.Assert(err, check.IsNil)
units, err := a.Units()
c.Assert(err, check.IsNil)
c.Assert(units, check.HasLen, 1)
}
func (s *S) TestDeployErasesOldImagesIfFailed(c *check.C) {
config.Set("docker:image-history-size", 1)
defer config.Unset("docker:image-history-size")
a := s.newApp("appdeployimagetest")
err := app.CreateApp(&a, s.user)
c.Assert(err, check.IsNil)
stopCh := s.stopContainers(s.server.URL(), 1)
defer func() { <-stopCh }()
baseImgName := "tsuru/app-" + a.Name + ":v1"
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python myapp.py",
},
}
version, err := newVersionForApp(s.p, &a, customData)
c.Assert(err, check.IsNil)
s.server.CustomHandler("/containers/create", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
data, _ := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
var result docker.Config
jsonErr := json.Unmarshal(data, &result)
if jsonErr == nil {
if result.Image == baseImgName {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("my awesome error"))
return
}
}
s.server.DefaultHandler().ServeHTTP(w, r)
}))
evt, err := event.New(&event.Opts{
Target: event.Target{Type: "app", Value: a.Name},
Kind: permission.PermAppDeploy,
Owner: s.token,
Allowed: event.Allowed(permission.PermApp),
})
c.Assert(err, check.IsNil)
imgs, err := s.p.Cluster().ListImages(docker.ListImagesOptions{All: true})
c.Assert(err, check.IsNil)
c.Assert(imgs, check.HasLen, 1)
_, err = s.p.Deploy(&a, version, evt)
c.Assert(err, check.ErrorMatches, ".*my awesome error.*")
imgs, err = s.p.Cluster().ListImages(docker.ListImagesOptions{All: true})
c.Assert(err, check.IsNil)
c.Assert(imgs, check.HasLen, 0)
}
func (s *S) TestRollbackDeployFailureDoesntEraseImage(c *check.C) {
a := s.newApp("otherapp")
err := s.conn.Apps().Insert(a)
c.Assert(err, check.IsNil)
s.p.Provision(&a)
s.server.CustomHandler("/containers/create", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
data, _ := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
var result docker.Config
jsonErr := json.Unmarshal(data, &result)
if jsonErr == nil {
if result.Image == "tsuru/app-otherapp:v1" {
w.WriteHeader(http.StatusInternalServerError)
return
}
}
s.server.DefaultHandler().ServeHTTP(w, r)
}))
w := safe.NewBuffer(make([]byte, 2048))
evt, err := event.New(&event.Opts{
Target: event.Target{Type: "app", Value: a.Name},
Kind: permission.PermAppDeploy,
Owner: s.token,
Allowed: event.Allowed(permission.PermApp),
})
c.Assert(err, check.IsNil)
version, err := newSuccessfulVersionForApp(s.p, &a, nil)
c.Assert(err, check.IsNil)
_, err = app.Deploy(app.DeployOptions{
App: &a,
OutputStream: w,
Image: version.BaseImageName(),
Rollback: true,
Event: evt,
})
c.Assert(err, check.NotNil)
units, err := a.Units()
c.Assert(err, check.IsNil)
c.Assert(units, check.HasLen, 0)
imgs, err := s.p.Cluster().ListImages(docker.ListImagesOptions{All: true})
c.Assert(err, check.IsNil)
c.Assert(imgs, check.HasLen, 2)
c.Assert(imgs[0].RepoTags, check.HasLen, 1)
c.Assert(imgs[1].RepoTags, check.HasLen, 1)
sort.Slice(imgs, func(i, j int) bool {
return imgs[i].RepoTags[0] < imgs[j].RepoTags[0]
})
c.Assert("tsuru/app-otherapp:v1", check.Equals, imgs[0].RepoTags[0])
c.Assert("tsuru/app-otherapp:v1-builder", check.Equals, imgs[1].RepoTags[0])
}
func (s *S) TestDeployImageID(c *check.C) {
stopCh := s.stopContainers(s.server.URL(), 1)
defer func() { <-stopCh }()
a := s.newApp("myapp")
err := app.CreateApp(&a, s.user)
c.Assert(err, check.IsNil)
var serviceBodies []string
rollback := s.addServiceInstance(c, a.Name, nil, func(w http.ResponseWriter, r *http.Request) {
data, _ := ioutil.ReadAll(r.Body)
serviceBodies = append(serviceBodies, string(data))
w.WriteHeader(http.StatusOK)
})
defer rollback()
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": []string{"/bin/sh", "-c", "python test.py"},
},
}
version, err := newVersionForApp(s.p, &a, customData)
c.Assert(err, check.IsNil)
evt, err := event.New(&event.Opts{
Target: event.Target{Type: "app", Value: a.Name},
Kind: permission.PermAppDeploy,
Owner: s.token,
Allowed: event.Allowed(permission.PermApp),
})
c.Assert(err, check.IsNil)
pullOpts := docker.PullImageOptions{
Repository: "tsuru/app-" + a.Name,
Tag: "v1",
}
err = s.p.Cluster().PullImage(pullOpts, dockercommon.RegistryAuthConfig(pullOpts.Repository))
c.Assert(err, check.IsNil)
_, err = s.p.Deploy(&a, version, evt)
c.Assert(err, check.IsNil)
units, err := a.Units()
c.Assert(err, check.IsNil)
c.Assert(units, check.HasLen, 1)
latestVersion, err := servicemanager.AppVersion.LatestSuccessfulVersion(&a)
c.Assert(err, check.IsNil)
expectedProcesses := map[string][]string{"web": {"/bin/sh", "-c", "python test.py"}}
c.Assert(latestVersion.VersionInfo().Processes, check.DeepEquals, expectedProcesses)
dcli, err := docker.NewClient(s.server.URL())
c.Assert(err, check.IsNil)
dockerContainer, err := dcli.InspectContainer(units[0].GetID())
c.Assert(err, check.IsNil)
expectedPortBindings := map[docker.Port][]docker.PortBinding{
"8888/tcp": {{HostIP: "", HostPort: ""}},
}
c.Assert(dockerContainer.HostConfig.PortBindings, check.DeepEquals, expectedPortBindings)
}
func (s *S) TestProvisionerDestroy(c *check.C) {
cont, err := s.newContainer(nil, nil)
c.Assert(err, check.IsNil)
a := provisiontest.NewFakeApp(cont.AppName, "python", 1)
unit := cont.AsUnit(a)
a.BindUnit(&unit)
s.p.Provision(a)
err = s.p.Destroy(a)
c.Assert(err, check.IsNil)
coll := s.p.Collection()
defer coll.Close()
count, err := coll.Find(bson.M{"appname": cont.AppName}).Count()
c.Assert(err, check.IsNil)
c.Assert(count, check.Equals, 0)
c.Assert(routertest.FakeRouter.HasBackend("myapp"), check.Equals, false)
c.Assert(a.HasBind(&unit), check.Equals, false)
}
func (s *S) TestProvisionerDestroyEmptyUnit(c *check.C) {
a := provisiontest.NewFakeApp("myapp", "python", 0)
s.p.Provision(a)
err := s.p.Destroy(a)
c.Assert(err, check.IsNil)
}
func (s *S) TestProvisionerAddUnits(c *check.C) {
a := provisiontest.NewFakeApp("myapp", "python", 0)
a.Deploys = 1
s.p.Provision(a)
_, err := s.newContainer(&newContainerOpts{AppName: a.GetName()}, nil)
c.Assert(err, check.IsNil)
err = s.p.AddUnits(a, 3, "web", nil)
c.Assert(err, check.IsNil)
units, err := s.p.Units(a)
c.Assert(err, check.IsNil)
c.Assert(units, check.HasLen, 4)
coll := s.p.Collection()
defer coll.Close()
count, err := coll.Find(bson.M{"appname": a.GetName()}).Count()
c.Assert(err, check.IsNil)
c.Assert(count, check.Equals, 4)
}
func (s *S) TestProvisionerAddUnitsInvalidProcess(c *check.C) {
a := provisiontest.NewFakeApp("myapp", "python", 0)
a.Deploys = 1
s.p.Provision(a)
_, err := s.newContainer(&newContainerOpts{AppName: a.GetName()}, nil)
c.Assert(err, check.IsNil)
err = s.p.AddUnits(a, 3, "bogus", nil)
c.Assert(err, check.FitsTypeOf, provision.InvalidProcessError{})
c.Assert(err, check.ErrorMatches, `process error: no command declared in Procfile for process "bogus"`)
}
func (s *S) TestProvisionerAddUnitsWithErrorDoesntLeaveLostUnits(c *check.C) {
var callCount int32
s.server.CustomHandler("/containers/create", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if atomic.AddInt32(&callCount, 1) == 2 {
w.WriteHeader(http.StatusInternalServerError)
return
}
s.server.DefaultHandler().ServeHTTP(w, r)
}))
defer s.server.CustomHandler("/containers/create", s.server.DefaultHandler())
a := provisiontest.NewFakeApp("myapp", "python", 0)
a.Deploys = 1
s.p.Provision(a)
coll := s.p.Collection()
defer coll.Close()
coll.Insert(container.Container{Container: types.Container{ID: "c-89320", AppName: a.GetName(), Version: "a345fe", Image: "tsuru/python:latest"}})
_, err := newSuccessfulVersionForApp(s.p, a, nil)
c.Assert(err, check.IsNil)
err = s.p.AddUnits(a, 3, "web", nil)
c.Assert(err, check.ErrorMatches, `.*API error \(500\).*`)
count, err := coll.Find(bson.M{"appname": a.GetName()}).Count()
c.Assert(err, check.IsNil)
c.Assert(count, check.Equals, 1)
}
func (s *S) TestProvisionerAddZeroUnits(c *check.C) {
a := provisiontest.NewFakeApp("myapp", "python", 0)
a.Deploys = 1
s.p.Provision(a)
_, err := newSuccessfulVersionForApp(s.p, a, nil)
c.Assert(err, check.IsNil)
coll := s.p.Collection()
defer coll.Close()
coll.Insert(container.Container{Container: types.Container{ID: "c-89320", AppName: a.GetName(), Version: "a345fe", Image: "tsuru/python:latest"}})
err = s.p.AddUnits(a, 0, "web", nil)
c.Assert(err, check.NotNil)
c.Assert(err.Error(), check.Equals, "Cannot add 0 units")
}
func (s *S) TestProvisionerAddUnitsWithNoDeploys(c *check.C) {
a := provisiontest.NewFakeApp("myapp", "python", 1)
s.p.Provision(a)
err := s.p.AddUnits(a, 1, "web", nil)
c.Assert(err, check.NotNil)
c.Assert(err.Error(), check.Equals, "New units can only be added after the first deployment")
}
func (s *S) TestProvisionerAddUnitsWithHost(c *check.C) {
p, err := s.startMultipleServersCluster()
c.Assert(err, check.IsNil)
a := provisiontest.NewFakeApp("myapp", "python", 0)
p.Provision(a)
coll := p.Collection()
defer coll.Close()
coll.Insert(container.Container{Container: types.Container{ID: "xxxfoo", AppName: a.GetName(), Version: "123987", Image: "tsuru/python:latest"}})
version, err := newSuccessfulVersionForApp(s.p, a, nil)
c.Assert(err, check.IsNil)
units, err := addContainersWithHost(&changeUnitsPipelineArgs{
toHost: "localhost",
toAdd: map[string]*containersToAdd{"web": {Quantity: 1}},
app: a,
version: version,
provisioner: p,
})
c.Assert(err, check.IsNil)
c.Assert(units, check.HasLen, 1)
c.Assert(units[0].HostAddr, check.Equals, "localhost")
count, err := coll.Find(bson.M{"appname": a.GetName()}).Count()
c.Assert(err, check.IsNil)
c.Assert(count, check.Equals, 2)
}
func (s *S) TestProvisionerAddUnitsWithHostPartialRollback(c *check.C) {
a := provisiontest.NewFakeApp("myapp", "python", 0)
s.p.Provision(a)
version, err := newSuccessfulVersionForApp(s.p, a, nil)
c.Assert(err, check.IsNil)
var callCount int32
s.server.CustomHandler("/containers/.*/start", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if atomic.AddInt32(&callCount, 1) == 2 {
w.WriteHeader(http.StatusInternalServerError)
return
}
s.server.DefaultHandler().ServeHTTP(w, r)
}))
units, err := addContainersWithHost(&changeUnitsPipelineArgs{
toAdd: map[string]*containersToAdd{"web": {Quantity: 2}},
app: a,
version: version,
provisioner: s.p,
})
c.Assert(err, check.ErrorMatches, "(?s).*error in docker node.*")
c.Assert(units, check.HasLen, 0)
coll := s.p.Collection()
defer coll.Close()
count, err := coll.Find(bson.M{"appname": a.GetName()}).Count()
c.Assert(err, check.IsNil)
c.Assert(count, check.Equals, 0)
}
func (s *S) TestProvisionerRemoveUnits(c *check.C) {
a1 := app.App{Name: "impius", Teams: []string{"tsuruteam", "nodockerforme"}, Pool: "pool1"}
cont1 := container.Container{Container: types.Container{ID: "1", Name: "impius1", AppName: a1.Name, ProcessName: "web", HostAddr: "url0", HostPort: "1"}}
cont2 := container.Container{Container: types.Container{ID: "2", Name: "mirror1", AppName: a1.Name, ProcessName: "worker", HostAddr: "url0", HostPort: "2"}}
cont3 := container.Container{Container: types.Container{ID: "3", Name: "dedication1", AppName: a1.Name, ProcessName: "web", HostAddr: "url0", HostPort: "3"}}
err := s.conn.Apps().Insert(a1)
c.Assert(err, check.IsNil)
p := pool.Pool{Name: "pool1"}
o := pool.AddPoolOptions{Name: p.Name}
err = pool.AddPool(o)
c.Assert(err, check.IsNil)
err = pool.AddTeamsToPool(p.Name, []string{
"tsuruteam",
"nodockerforme",
})
c.Assert(err, check.IsNil)
contColl := s.p.Collection()
defer contColl.Close()
err = contColl.Insert(
cont1, cont2, cont3,
)
c.Assert(err, check.IsNil)
scheduler := segregatedScheduler{provisioner: s.p}
s.p.storage = &cluster.MapStorage{}
clusterInstance, err := cluster.New(&scheduler, s.p.storage, "")
c.Assert(err, check.IsNil)
s.p.cluster = clusterInstance
s.p.scheduler = &scheduler
err = clusterInstance.Register(cluster.Node{
Address: "http://url0:1234",
Metadata: map[string]string{"pool": "pool1"},
})
c.Assert(err, check.IsNil)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python myapp.py",
},
}
papp := provisiontest.NewFakeApp(a1.Name, "python", 0)
s.p.Provision(papp)
_, err = newSuccessfulVersionForApp(s.p, papp, customData)
c.Assert(err, check.IsNil)
conts := []container.Container{cont1, cont2, cont3}
units := []provision.Unit{cont1.AsUnit(papp), cont2.AsUnit(papp), cont3.AsUnit(papp)}
for i := range conts {
err = routertest.FakeRouter.AddRoutes(a1.Name, []*url.URL{conts[i].Address()})
c.Assert(err, check.IsNil)
err = papp.BindUnit(&units[i])
c.Assert(err, check.IsNil)
}
err = s.p.RemoveUnits(papp, 2, "web", nil)
c.Assert(err, check.IsNil)
_, err = s.p.GetContainer(conts[0].ID)
c.Assert(err, check.NotNil)
_, err = s.p.GetContainer(conts[1].ID)
c.Assert(err, check.IsNil)
_, err = s.p.GetContainer(conts[2].ID)
c.Assert(err, check.NotNil)
c.Assert(s.p.scheduler.ignoredContainers, check.IsNil)
c.Assert(routertest.FakeRouter.HasRoute(a1.Name, conts[0].Address().String()), check.Equals, false)
c.Assert(routertest.FakeRouter.HasRoute(a1.Name, conts[1].Address().String()), check.Equals, true)
c.Assert(routertest.FakeRouter.HasRoute(a1.Name, conts[2].Address().String()), check.Equals, false)
c.Assert(papp.HasBind(&units[0]), check.Equals, false)
c.Assert(papp.HasBind(&units[1]), check.Equals, true)
c.Assert(papp.HasBind(&units[2]), check.Equals, false)
}
func (s *S) TestProvisionerRemoveUnitsFailRemoveOldRoute(c *check.C) {
a1 := app.App{Name: "impius", Teams: []string{"tsuruteam", "nodockerforme"}, Pool: "pool1"}
cont1 := container.Container{Container: types.Container{ID: "1", Name: "impius1", AppName: a1.Name, ProcessName: "web", HostAddr: "url0", HostPort: "1"}}
cont2 := container.Container{Container: types.Container{ID: "2", Name: "mirror1", AppName: a1.Name, ProcessName: "worker", HostAddr: "url0", HostPort: "2"}}
cont3 := container.Container{Container: types.Container{ID: "3", Name: "dedication1", AppName: a1.Name, ProcessName: "web", HostAddr: "url0", HostPort: "3"}}
err := s.conn.Apps().Insert(a1)
c.Assert(err, check.IsNil)
p := pool.Pool{Name: "pool1"}
o := pool.AddPoolOptions{Name: p.Name}
err = pool.AddPool(o)
c.Assert(err, check.IsNil)
err = pool.AddTeamsToPool(p.Name, []string{
"tsuruteam",
"nodockerforme",
})
c.Assert(err, check.IsNil)
contColl := s.p.Collection()
defer contColl.Close()
err = contColl.Insert(
cont1, cont2, cont3,
)
c.Assert(err, check.IsNil)
scheduler := segregatedScheduler{provisioner: s.p}
s.p.storage = &cluster.MapStorage{}
clusterInstance, err := cluster.New(&scheduler, s.p.storage, "")
c.Assert(err, check.IsNil)
s.p.cluster = clusterInstance
s.p.scheduler = &scheduler
err = clusterInstance.Register(cluster.Node{
Address: "http://url0:1234",
Metadata: map[string]string{"pool": "pool1"},
})
c.Assert(err, check.IsNil)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python myapp.py",
},
}
papp := provisiontest.NewFakeApp(a1.Name, "python", 0)
s.p.Provision(papp)
_, err = newSuccessfulVersionForApp(s.p, papp, customData)
c.Assert(err, check.IsNil)
conts := []container.Container{cont1, cont2, cont3}
units := []provision.Unit{cont1.AsUnit(papp), cont2.AsUnit(papp), cont3.AsUnit(papp)}
for i := range conts {
err = routertest.FakeRouter.AddRoutes(a1.Name, []*url.URL{conts[i].Address()})
c.Assert(err, check.IsNil)
err = papp.BindUnit(&units[i])
c.Assert(err, check.IsNil)
}
routertest.FakeRouter.FailForIp(conts[2].Address().String())
err = s.p.RemoveUnits(papp, 2, "web", nil)
c.Assert(err, check.ErrorMatches, "error removing routes, units weren't removed: Forced failure")
_, err = s.p.GetContainer(conts[0].ID)
c.Assert(err, check.IsNil)
_, err = s.p.GetContainer(conts[1].ID)
c.Assert(err, check.IsNil)
_, err = s.p.GetContainer(conts[2].ID)
c.Assert(err, check.IsNil)
c.Assert(s.p.scheduler.ignoredContainers, check.IsNil)
c.Assert(routertest.FakeRouter.HasRoute(a1.Name, conts[0].Address().String()), check.Equals, true)
c.Assert(routertest.FakeRouter.HasRoute(a1.Name, conts[1].Address().String()), check.Equals, true)
c.Assert(routertest.FakeRouter.HasRoute(a1.Name, conts[2].Address().String()), check.Equals, true)
c.Assert(papp.HasBind(&units[0]), check.Equals, true)
c.Assert(papp.HasBind(&units[1]), check.Equals, true)
c.Assert(papp.HasBind(&units[2]), check.Equals, true)
}
func (s *S) TestProvisionerRemoveUnitsEmptyProcess(c *check.C) {
a1 := app.App{Name: "impius", Teams: []string{"tsuruteam"}, Pool: "pool1"}
cont1 := container.Container{Container: types.Container{ID: "1", Name: "impius1", AppName: a1.Name}}
err := s.conn.Apps().Insert(a1)
c.Assert(err, check.IsNil)
p := pool.Pool{Name: "pool1"}
o := pool.AddPoolOptions{Name: p.Name}
err = pool.AddPool(o)
c.Assert(err, check.IsNil)
err = pool.AddTeamsToPool(p.Name, []string{"tsuruteam"})
c.Assert(err, check.IsNil)
contColl := s.p.Collection()
defer contColl.Close()
err = contColl.Insert(cont1)
c.Assert(err, check.IsNil)
scheduler := segregatedScheduler{provisioner: s.p}
s.p.storage = &cluster.MapStorage{}
clusterInstance, err := cluster.New(&scheduler, s.p.storage, "")
c.Assert(err, check.IsNil)
s.p.scheduler = &scheduler
s.p.cluster = clusterInstance
err = clusterInstance.Register(cluster.Node{
Address: s.server.URL(),
Metadata: map[string]string{"pool": "pool1"},
})
c.Assert(err, check.IsNil)
opts := docker.CreateContainerOptions{Name: cont1.Name}
_, err = scheduler.Schedule(clusterInstance, &opts, &container.SchedulerOpts{AppName: a1.Name, ProcessName: "web"})
c.Assert(err, check.IsNil)
papp := provisiontest.NewFakeApp(a1.Name, "python", 0)
s.p.Provision(papp)
c.Assert(err, check.IsNil)
err = s.p.RemoveUnits(papp, 1, "", nil)
c.Assert(err, check.IsNil)
_, err = s.p.GetContainer(cont1.ID)
c.Assert(err, check.NotNil)
}
func (s *S) TestProvisionerRemoveUnitsNotFound(c *check.C) {
err := s.p.RemoveUnits(nil, 1, "web", nil)
c.Assert(err, check.NotNil)
c.Assert(err.Error(), check.Equals, "remove units: app should not be nil")
}
func (s *S) TestProvisionerRemoveUnitsZeroUnits(c *check.C) {
err := s.p.RemoveUnits(provisiontest.NewFakeApp("something", "python", 0), 0, "web", nil)
c.Assert(err, check.NotNil)
c.Assert(err.Error(), check.Equals, "cannot remove zero units")
}
func (s *S) TestProvisionerRemoveUnitsTooManyUnits(c *check.C) {
a1 := app.App{Name: "impius", Teams: []string{"tsuruteam", "nodockerforme"}, Pool: "pool1"}
cont1 := container.Container{Container: types.Container{ID: "1", Name: "impius1", AppName: a1.Name, ProcessName: "web"}}
cont2 := container.Container{Container: types.Container{ID: "2", Name: "mirror1", AppName: a1.Name, ProcessName: "web"}}
cont3 := container.Container{Container: types.Container{ID: "3", Name: "dedication1", AppName: a1.Name, ProcessName: "web"}}
err := s.conn.Apps().Insert(a1)
c.Assert(err, check.IsNil)
p := pool.Pool{Name: "pool1"}
o := pool.AddPoolOptions{Name: p.Name}
err = pool.AddPool(o)
c.Assert(err, check.IsNil)
err = pool.AddTeamsToPool(p.Name, []string{
"tsuruteam",
"nodockerforme",
})
c.Assert(err, check.IsNil)
contColl := s.p.Collection()
defer contColl.Close()
err = contColl.Insert(
cont1, cont2, cont3,
)
c.Assert(err, check.IsNil)
scheduler := segregatedScheduler{provisioner: s.p}
s.p.storage = &cluster.MapStorage{}
clusterInstance, err := cluster.New(&scheduler, s.p.storage, "")
s.p.scheduler = &scheduler
s.p.cluster = clusterInstance
c.Assert(err, check.IsNil)
err = clusterInstance.Register(cluster.Node{
Address: "http://url0:1234",
Metadata: map[string]string{"pool": "pool1"},
})
c.Assert(err, check.IsNil)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python myapp.py",
},
}
papp := provisiontest.NewFakeApp(a1.Name, "python", 0)
s.p.Provision(papp)
_, err = newSuccessfulVersionForApp(s.p, papp, customData)
c.Assert(err, check.IsNil)
err = s.p.RemoveUnits(papp, 4, "web", nil)
c.Assert(err, check.NotNil)
c.Assert(err.Error(), check.Equals, "cannot remove 4 units from process \"web\", only 3 available")
}
func (s *S) TestProvisionerRemoveUnitsInvalidProcess(c *check.C) {
a1 := app.App{Name: "impius", Teams: []string{"tsuruteam"}, Pool: "pool1"}
cont1 := container.Container{Container: types.Container{ID: "1", Name: "impius1", AppName: a1.Name}}
err := s.conn.Apps().Insert(a1)
c.Assert(err, check.IsNil)
p := pool.Pool{Name: "pool1"}
o := pool.AddPoolOptions{Name: p.Name}
err = pool.AddPool(o)
c.Assert(err, check.IsNil)
err = pool.AddTeamsToPool(p.Name, []string{
"tsuruteam",
})
c.Assert(err, check.IsNil)
contColl := s.p.Collection()
defer contColl.Close()
err = contColl.Insert(cont1)
c.Assert(err, check.IsNil)
scheduler := segregatedScheduler{provisioner: s.p}
s.p.storage = &cluster.MapStorage{}
clusterInstance, err := cluster.New(&scheduler, s.p.storage, "")
s.p.scheduler = &scheduler
s.p.cluster = clusterInstance
c.Assert(err, check.IsNil)
err = clusterInstance.Register(cluster.Node{
Address: s.server.URL(),
Metadata: map[string]string{"pool": "pool1"},
})
c.Assert(err, check.IsNil)
opts := docker.CreateContainerOptions{Name: cont1.Name}
_, err = scheduler.Schedule(clusterInstance, &opts, &container.SchedulerOpts{AppName: a1.Name, ProcessName: "web"})
c.Assert(err, check.IsNil)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python myapp.py",
},
}
papp := provisiontest.NewFakeApp(a1.Name, "python", 0)
s.p.Provision(papp)
_, err = newSuccessfulVersionForApp(s.p, papp, customData)
c.Assert(err, check.IsNil)
err = s.p.RemoveUnits(papp, 1, "worker", nil)
c.Assert(err, check.NotNil)
c.Assert(err.Error(), check.Equals, `process error: no command declared in Procfile for process "worker"`)
}
func (s *S) TestProvisionerSetUnitStatus(c *check.C) {
opts := newContainerOpts{Status: provision.StatusStarted.String(), AppName: "someapp"}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
err = s.p.SetUnitStatus(provision.Unit{ID: container.ID, AppName: container.AppName}, provision.StatusError)
c.Assert(err, check.IsNil)
container, err = s.p.GetContainer(container.ID)
c.Assert(err, check.IsNil)
c.Assert(container.Status, check.Equals, provision.StatusError.String())
c.Assert(container.ExpectedStatus(), check.Equals, provision.StatusStarted)
}
func (s *S) TestProvisionerSetUnitStatusAsleep(c *check.C) {
opts := newContainerOpts{Status: provision.StatusStarted.String(), AppName: "someapp"}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
err = container.Sleep(s.p.ClusterClient(), s.p.ActionLimiter())
c.Assert(err, check.IsNil)
err = s.p.SetUnitStatus(provision.Unit{ID: container.ID, AppName: container.AppName}, provision.StatusStopped)
c.Assert(err, check.IsNil)
container, err = s.p.GetContainer(container.ID)
c.Assert(err, check.IsNil)
c.Assert(container.Status, check.Equals, provision.StatusAsleep.String())
}
func (s *S) TestProvisionerSetUnitStatusUpdatesIp(c *check.C) {
err := s.conn.Apps().Insert(&app.App{Name: "myawesomeapp"})
c.Assert(err, check.IsNil)
opts := newContainerOpts{Status: provision.StatusStarted.String(), AppName: "myawesomeapp"}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
container.IP = "xinvalidx"
coll := s.p.Collection()
defer coll.Close()
err = coll.Update(bson.M{"id": container.ID}, container)
c.Assert(err, check.IsNil)
err = s.p.SetUnitStatus(provision.Unit{ID: container.ID, AppName: container.AppName}, provision.StatusStarted)
c.Assert(err, check.IsNil)
container, err = s.p.GetContainer(container.ID)
c.Assert(err, check.IsNil)
c.Assert(container.Status, check.Equals, provision.StatusStarted.String())
c.Assert(container.IP, check.Matches, `\d+.\d+.\d+.\d+`)
}
func (s *S) TestProvisionerSetUnitStatusWrongApp(c *check.C) {
opts := newContainerOpts{Status: provision.StatusStarted.String(), AppName: "someapp"}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
err = s.p.SetUnitStatus(provision.Unit{ID: container.ID, AppName: container.AppName + "a"}, provision.StatusError)
c.Assert(err, check.NotNil)
c.Assert(err.Error(), check.Equals, "wrong app name")
container, err = s.p.GetContainer(container.ID)
c.Assert(err, check.IsNil)
c.Assert(container.Status, check.Equals, provision.StatusStarted.String())
}
func (s *S) TestProvisionSetUnitStatusNoAppName(c *check.C) {
opts := newContainerOpts{Status: provision.StatusStarted.String(), AppName: "someapp"}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
err = s.p.SetUnitStatus(provision.Unit{ID: container.ID}, provision.StatusError)
c.Assert(err, check.IsNil)
container, err = s.p.GetContainer(container.ID)
c.Assert(err, check.IsNil)
c.Assert(container.Status, check.Equals, provision.StatusError.String())
}
func (s *S) TestProvisionerSetUnitStatusUnitNotFound(c *check.C) {
err := s.p.SetUnitStatus(provision.Unit{ID: "mycontainer", AppName: "myapp"}, provision.StatusError)
c.Assert(err, check.NotNil)
e, ok := err.(*provision.UnitNotFoundError)
c.Assert(ok, check.Equals, true)
c.Assert(e.ID, check.Equals, "mycontainer")
}
func (s *S) TestProvisionerSetUnitStatusBuildingContainer(c *check.C) {
opts := newContainerOpts{Status: provision.StatusBuilding.String(), AppName: "someapp"}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
err = s.p.SetUnitStatus(provision.Unit{ID: container.ID}, provision.StatusStarted)
c.Assert(err, check.IsNil)
container, err = s.p.GetContainer(container.ID)
c.Assert(err, check.IsNil)
c.Assert(container.Status, check.Equals, provision.StatusBuilding.String())
}
func (s *S) TestProvisionerSetUnitStatusSearchByName(c *check.C) {
opts := newContainerOpts{Status: provision.StatusStarted.String(), AppName: "someapp"}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
err = s.p.SetUnitStatus(provision.Unit{ID: "invalid-id", Name: container.Name, AppName: container.AppName}, provision.StatusError)
c.Assert(err, check.IsNil)
container, err = s.p.GetContainer(container.ID)
c.Assert(err, check.IsNil)
c.Assert(container.Status, check.Equals, provision.StatusError.String())
}
func (s *S) TestProvisionerSetUnitStatusUnexpectedStopped(c *check.C) {
opts := newContainerOpts{Status: provision.StatusStarted.String(), AppName: "someapp"}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
err = s.p.SetUnitStatus(provision.Unit{ID: container.ID, AppName: container.AppName}, provision.StatusStopped)
c.Assert(err, check.IsNil)
container, err = s.p.GetContainer(container.ID)
c.Assert(err, check.IsNil)
c.Assert(container.Status, check.Equals, provision.StatusError.String())
}
func (s *S) TestProvisionerSetUnitStatusExpectedStopped(c *check.C) {
opts := newContainerOpts{Status: provision.StatusStopped.String(), AppName: "someapp"}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
err = s.p.SetUnitStatus(provision.Unit{ID: container.ID, AppName: container.AppName}, provision.StatusStopped)
c.Assert(err, check.IsNil)
container, err = s.p.GetContainer(container.ID)
c.Assert(err, check.IsNil)
c.Assert(container.Status, check.Equals, provision.StatusStopped.String())
}
func (s *S) TestProvisionerSetUnitStatusUnexpectedStarted(c *check.C) {
opts := newContainerOpts{Status: provision.StatusStopped.String(), AppName: "someapp"}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
err = s.p.SetUnitStatus(provision.Unit{ID: container.ID, AppName: container.AppName}, provision.StatusStarted)
c.Assert(err, check.IsNil)
container, err = s.p.GetContainer(container.ID)
c.Assert(err, check.IsNil)
c.Assert(container.Status, check.Equals, provision.StatusError.String())
c.Assert(container.ExpectedStatus(), check.Equals, provision.StatusStopped)
}
func (s *S) TestProvisionerExecuteCommand(c *check.C) {
a := provisiontest.NewFakeApp("starbreaker", "python", 1)
container1, err := s.newContainer(&newContainerOpts{AppName: a.GetName()}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container1)
coll := s.p.Collection()
defer coll.Close()
coll.Update(bson.M{"id": container1.ID}, container1)
container2, err := s.newContainer(&newContainerOpts{AppName: a.GetName()}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container2)
coll.Update(bson.M{"id": container2.ID}, container2)
var executed bool
s.server.PrepareExec("*", func() {
executed = true
})
var stdout, stderr bytes.Buffer
err = s.p.ExecuteCommand(provision.ExecOptions{
App: a,
Stdout: &stdout,
Stderr: &stderr,
Units: []string{container1.ID, container2.ID},
Cmds: []string{"ls", "-l"},
})
c.Assert(err, check.IsNil)
c.Assert(executed, check.Equals, true)
}
func (s *S) TestProvisionerExecuteCommandSingleContainer(c *check.C) {
a := provisiontest.NewFakeApp("almah", "static", 1)
container, err := s.newContainer(&newContainerOpts{AppName: a.GetName()}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
coll := s.p.Collection()
defer coll.Close()
coll.Update(bson.M{"id": container.ID}, container)
var stdout, stderr bytes.Buffer
var executed bool
s.server.PrepareExec("*", func() {
executed = true
})
err = s.p.ExecuteCommand(provision.ExecOptions{
App: a,
Stdout: &stdout,
Stderr: &stderr,
Units: []string{container.ID},
Cmds: []string{"ls", "-l"},
})
c.Assert(err, check.IsNil)
c.Assert(executed, check.Equals, true)
}
func (s *S) TestProvisionerExecuteCommandNoUnits(c *check.C) {
a := provisiontest.NewFakeApp("almah", "static", 1)
_, err := newSuccessfulVersionForApp(s.p, a, nil)
c.Assert(err, check.IsNil)
a.SetEnv(bind.EnvVar{Name: "ENV", Value: "OK"})
var stdout, stderr bytes.Buffer
var created bool
s.server.CustomHandler("/containers/create", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
created = true
data, _ := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
var config docker.Config
json.Unmarshal(data, &config)
sort.Strings(config.Env)
c.Assert(config.Env, check.DeepEquals, []string{"ENV=OK", "PORT=8888", "TSURU_HOST=", "TSURU_PROCESSNAME=", "port=8888"})
var createOpts docker.CreateContainerOptions
json.Unmarshal(data, &createOpts)
c.Assert(createOpts.HostConfig, check.NotNil)
c.Assert(createOpts.HostConfig.AutoRemove, check.Equals, true)
s.server.DefaultHandler().ServeHTTP(w, r)
}))
s.server.CustomHandler("/containers/.*/attach", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
hijacker, ok := w.(http.Hijacker)
if !ok {
http.Error(w, "cannot hijack connection", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/vnd.docker.raw-stream")
w.WriteHeader(http.StatusOK)
conn, _, cErr := hijacker.Hijack()
if cErr != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
outStream := stdcopy.NewStdWriter(conn, stdcopy.Stdout)
fmt.Fprintf(outStream, "test")
errStream := stdcopy.NewStdWriter(conn, stdcopy.Stderr)
fmt.Fprintf(errStream, "errtest")
conn.Close()
}))
err = s.p.ExecuteCommand(provision.ExecOptions{
App: a,
Stdout: &stdout,
Stderr: &stderr,
Cmds: []string{"ls", "-l"},
})
c.Assert(err, check.IsNil)
c.Assert(stdout.String(), check.Equals, "test")
c.Assert(stderr.String(), check.Equals, "errtest")
c.Assert(created, check.Equals, true)
}
func (s *S) TestProvisionerExecuteCommandNoUnitsNoImage(c *check.C) {
s.server.CustomHandler("/images/create", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// no actual pull executed
w.WriteHeader(http.StatusOK)
}))
a := provisiontest.NewFakeApp("almah", "static", 2)
var buf bytes.Buffer
err := s.p.ExecuteCommand(provision.ExecOptions{
App: a,
Stdout: &buf,
Stderr: &buf,
Cmds: []string{"ls", "-l"},
})
c.Assert(err, check.ErrorMatches, ".*no versions available for app.*")
}
func (s *S) TestProvisionerCollection(c *check.C) {
collection := s.p.Collection()
defer collection.Close()
c.Assert(collection.Name, check.Equals, s.collName)
}
func (s *S) TestProvisionerCollectionDefaultConfig(c *check.C) {
var requests []*http.Request
server, err := testing.NewServer("127.0.0.1:0", nil, func(r *http.Request) {
requests = append(requests, r)
})
c.Assert(err, check.IsNil)
defer server.Stop()
config.Unset("docker:collection")
var p dockerProvisioner
err = p.Initialize()
c.Assert(err, check.IsNil)
col := p.Collection()
defer col.Close()
c.Assert(col.Name, check.Equals, "dockercluster")
config.Set("docker:collection", s.collName)
}
func (s *S) TestProvisionerCollectionErrorConfig(c *check.C) {
var requests []*http.Request
server, err := testing.NewServer("127.0.0.1:0", nil, func(r *http.Request) {
requests = append(requests, r)
})
c.Assert(err, check.IsNil)
defer server.Stop()
config.Set("docker:collection", true)
var p dockerProvisioner
err = p.Initialize()
c.Assert(err, check.ErrorMatches, ".*value for the key.*is not a string.*")
config.Set("docker:collection", s.collName)
}
func (s *S) TestProvisionerStart(c *check.C) {
err := s.conn.Apps().Insert(&app.App{Name: "almah"})
c.Assert(err, check.IsNil)
a := provisiontest.NewFakeApp("almah", "static", 1)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python web.py",
"worker": "python worker.py",
},
}
cont1, err := s.newContainer(&newContainerOpts{
AppName: a.GetName(),
ImageCustomData: customData,
ProcessName: "web",
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont1)
cont2, err := s.newContainer(&newContainerOpts{
AppName: a.GetName(),
ImageCustomData: customData,
ProcessName: "worker",
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont2)
dcli, err := docker.NewClient(s.server.URL())
c.Assert(err, check.IsNil)
dockerContainer, err := dcli.InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, false)
dockerContainer, err = dcli.InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, false)
err = s.p.Start(a, "")
c.Assert(err, check.IsNil)
dockerContainer, err = dcli.InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
cont1, err = s.p.GetContainer(cont1.ID)
c.Assert(err, check.IsNil)
expectedIP := dockerContainer.NetworkSettings.IPAddress
expectedPort := dockerContainer.NetworkSettings.Ports["8888/tcp"][0].HostPort
c.Assert(cont1.IP, check.Equals, expectedIP)
c.Assert(cont1.HostPort, check.Equals, expectedPort)
c.Assert(cont1.Status, check.Equals, provision.StatusStarting.String())
dockerContainer, err = dcli.InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
cont2, err = s.p.GetContainer(cont2.ID)
c.Assert(err, check.IsNil)
expectedIP = dockerContainer.NetworkSettings.IPAddress
expectedPort = dockerContainer.NetworkSettings.Ports["8888/tcp"][0].HostPort
c.Assert(cont2.IP, check.Equals, expectedIP)
c.Assert(cont2.HostPort, check.Equals, expectedPort)
c.Assert(cont2.Status, check.Equals, provision.StatusStarting.String())
}
func (s *S) TestProvisionerStartProcess(c *check.C) {
err := s.conn.Apps().Insert(&app.App{Name: "almah"})
c.Assert(err, check.IsNil)
a := provisiontest.NewFakeApp("almah", "static", 1)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python web.py",
"worker": "python worker.py",
},
}
cont1, err := s.newContainer(&newContainerOpts{
AppName: a.GetName(),
ImageCustomData: customData,
ProcessName: "web",
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont1)
cont2, err := s.newContainer(&newContainerOpts{
AppName: a.GetName(),
ImageCustomData: customData,
ProcessName: "worker",
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont2)
dcli, err := docker.NewClient(s.server.URL())
c.Assert(err, check.IsNil)
dockerContainer, err := dcli.InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, false)
dockerContainer, err = dcli.InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, false)
err = s.p.Start(a, "web")
c.Assert(err, check.IsNil)
dockerContainer, err = dcli.InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, false)
dockerContainer, err = dcli.InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
cont1, err = s.p.GetContainer(cont1.ID)
c.Assert(err, check.IsNil)
expectedIP := dockerContainer.NetworkSettings.IPAddress
expectedPort := dockerContainer.NetworkSettings.Ports["8888/tcp"][0].HostPort
c.Assert(cont1.IP, check.Equals, expectedIP)
c.Assert(cont1.HostPort, check.Equals, expectedPort)
c.Assert(cont1.Status, check.Equals, provision.StatusStarting.String())
}
func (s *S) TestProvisionerStop(c *check.C) {
dcli, _ := docker.NewClient(s.server.URL())
a := provisiontest.NewFakeApp("almah", "static", 2)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python web.py",
"worker": "python worker.py",
},
}
cont1, err := s.newContainer(&newContainerOpts{
AppName: a.GetName(),
ImageCustomData: customData,
ProcessName: "web",
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont1)
cont2, err := s.newContainer(&newContainerOpts{
AppName: a.GetName(),
ImageCustomData: customData,
ProcessName: "worker",
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont2)
err = dcli.StartContainer(cont1.ID, nil)
c.Assert(err, check.IsNil)
dockerContainer, err := dcli.InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
err = dcli.StartContainer(cont2.ID, nil)
c.Assert(err, check.IsNil)
dockerContainer, err = dcli.InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
err = s.p.Stop(a, "")
c.Assert(err, check.IsNil)
dockerContainer, err = dcli.InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, false)
dockerContainer, err = dcli.InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, false)
}
func (s *S) TestProvisionerStopProcess(c *check.C) {
dcli, _ := docker.NewClient(s.server.URL())
a := provisiontest.NewFakeApp("almah", "static", 2)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python web.py",
"worker": "python worker.py",
},
}
cont1, err := s.newContainer(&newContainerOpts{
AppName: a.GetName(),
ImageCustomData: customData,
ProcessName: "web",
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont1)
cont2, err := s.newContainer(&newContainerOpts{
AppName: a.GetName(),
ImageCustomData: customData,
ProcessName: "worker",
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont2)
err = dcli.StartContainer(cont1.ID, nil)
c.Assert(err, check.IsNil)
dockerContainer, err := dcli.InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
err = dcli.StartContainer(cont2.ID, nil)
c.Assert(err, check.IsNil)
dockerContainer, err = dcli.InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
err = s.p.Stop(a, "worker")
c.Assert(err, check.IsNil)
dockerContainer, err = dcli.InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
dockerContainer, err = dcli.InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, false)
}
func (s *S) TestProvisionerStopSkipAlreadyStoppedContainers(c *check.C) {
dcli, _ := docker.NewClient(s.server.URL())
a := provisiontest.NewFakeApp("almah", "static", 2)
container, err := s.newContainer(&newContainerOpts{AppName: a.GetName()}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
err = dcli.StartContainer(container.ID, nil)
c.Assert(err, check.IsNil)
dockerContainer, err := dcli.InspectContainer(container.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
container2, err := s.newContainer(&newContainerOpts{AppName: a.GetName()}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container2)
err = dcli.StartContainer(container2.ID, nil)
c.Assert(err, check.IsNil)
err = dcli.StopContainer(container2.ID, 1)
c.Assert(err, check.IsNil)
dockerContainer2, err := dcli.InspectContainer(container2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer2.State.Running, check.Equals, false)
err = s.p.Stop(a, "")
c.Assert(err, check.IsNil)
dockerContainer, err = dcli.InspectContainer(container.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, false)
dockerContainer2, err = dcli.InspectContainer(container2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer2.State.Running, check.Equals, false)
}
func (s *S) TestProvisionerSleep(c *check.C) {
dcli, err := docker.NewClient(s.server.URL())
c.Assert(err, check.IsNil)
a := provisiontest.NewFakeApp("almah", "static", 2)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python web.py",
"worker": "python worker.py",
},
}
cont1, err := s.newContainer(&newContainerOpts{
AppName: a.GetName(),
ImageCustomData: customData,
ProcessName: "web",
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont1)
err = dcli.StartContainer(cont1.ID, nil)
c.Assert(err, check.IsNil)
err = cont1.SetStatus(s.p.ClusterClient(), provision.StatusStarted, true)
c.Assert(err, check.IsNil)
cont2, err := s.newContainer(&newContainerOpts{
AppName: a.GetName(),
ImageCustomData: customData,
ProcessName: "worker",
}, nil)
c.Assert(err, check.IsNil)
err = dcli.StartContainer(cont2.ID, nil)
c.Assert(err, check.IsNil)
err = cont2.SetStatus(s.p.ClusterClient(), provision.StatusStarted, true)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont2)
dockerContainer, err := dcli.InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
dockerContainer, err = dcli.InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
err = s.p.Sleep(a, "")
c.Assert(err, check.IsNil)
coll := s.p.Collection()
defer coll.Close()
err = coll.Find(bson.M{"id": cont1.ID}).One(&cont1)
c.Assert(err, check.IsNil)
err = coll.Find(bson.M{"id": cont2.ID}).One(&cont2)
c.Assert(err, check.IsNil)
c.Assert(cont1.Status, check.Equals, provision.StatusAsleep.String())
c.Assert(cont2.Status, check.Equals, provision.StatusAsleep.String())
dockerContainer, err = dcli.InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, false)
dockerContainer, err = dcli.InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, false)
}
func (s *S) TestProvisionerSleepProcess(c *check.C) {
dcli, _ := docker.NewClient(s.server.URL())
a := provisiontest.NewFakeApp("almah", "static", 2)
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python web.py",
"worker": "python worker.py",
},
}
cont1, err := s.newContainer(&newContainerOpts{
AppName: a.GetName(),
ImageCustomData: customData,
ProcessName: "web",
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont1)
err = cont1.SetStatus(s.p.ClusterClient(), provision.StatusStarted, true)
c.Assert(err, check.IsNil)
cont2, err := s.newContainer(&newContainerOpts{
AppName: a.GetName(),
ImageCustomData: customData,
ProcessName: "worker",
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont2)
err = cont2.SetStatus(s.p.ClusterClient(), provision.StatusStarted, true)
c.Assert(err, check.IsNil)
err = dcli.StartContainer(cont1.ID, nil)
c.Assert(err, check.IsNil)
dockerContainer, err := dcli.InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
err = dcli.StartContainer(cont2.ID, nil)
c.Assert(err, check.IsNil)
dockerContainer, err = dcli.InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
err = s.p.Sleep(a, "web")
c.Assert(err, check.IsNil)
dockerContainer, err = dcli.InspectContainer(cont1.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, false)
dockerContainer, err = dcli.InspectContainer(cont2.ID)
c.Assert(err, check.IsNil)
c.Assert(dockerContainer.State.Running, check.Equals, true)
}
func (s *S) TestProvisionerUnits(c *check.C) {
app := app.App{Name: "myapplication"}
coll := s.p.Collection()
defer coll.Close()
err := coll.Insert(
container.Container{
Container: types.Container{
ID: "9930c24f1c4f",
AppName: app.Name,
Type: "python",
Status: provision.StatusBuilding.String(),
IP: "127.0.0.4",
HostAddr: "192.168.123.9",
HostPort: "9025",
},
},
)
c.Assert(err, check.IsNil)
units, err := s.p.Units(&app)
c.Assert(err, check.IsNil)
expected := []provision.Unit{
{
ID: "9930c24f1c4f",
AppName: "myapplication",
Type: "python",
Status: provision.StatusBuilding,
IP: "192.168.123.9",
Address: &url.URL{
Scheme: "http",
Host: "192.168.123.9:9025",
},
},
}
c.Assert(units, check.DeepEquals, expected)
}
func (s *S) TestProvisionerGetAppFromUnitID(c *check.C) {
app := app.App{Name: "myapplication"}
err := s.conn.Apps().Insert(app)
c.Assert(err, check.IsNil)
coll := s.p.Collection()
defer coll.Close()
err = coll.Insert(
container.Container{
Container: types.Container{
ID: "9930c24f1c4f",
AppName: app.Name,
Type: "python",
Status: provision.StatusBuilding.String(),
IP: "127.0.0.4",
HostAddr: "192.168.123.9",
HostPort: "9025",
},
},
)
c.Assert(err, check.IsNil)
a, err := s.p.GetAppFromUnitID("9930c24f1c4f")
c.Assert(err, check.IsNil)
c.Assert(app.GetName(), check.Equals, a.GetName())
}
func (s *S) TestProvisionerGetAppFromUnitIDAppNotFound(c *check.C) {
app := app.App{Name: "myapplication"}
coll := s.p.Collection()
defer coll.Close()
err := coll.Insert(
container.Container{
Container: types.Container{
ID: "9930c24f1c4f",
AppName: app.Name,
Type: "python",
Status: provision.StatusBuilding.String(),
IP: "127.0.0.4",
HostAddr: "192.168.123.9",
HostPort: "9025",
},
},
)
c.Assert(err, check.IsNil)
_, err = s.p.GetAppFromUnitID("9930c24f1c4f")
c.Assert(err, check.NotNil)
}
func (s *S) TestProvisionerGetAppFromUnitIDContainerNotFound(c *check.C) {
_, err := s.p.GetAppFromUnitID("not found")
c.Assert(err, check.NotNil)
}
func (s *S) TestProvisionerUnitsAppDoesNotExist(c *check.C) {
app := app.App{Name: "myapplication"}
units, err := s.p.Units(&app)
c.Assert(err, check.IsNil)
expected := []provision.Unit{}
c.Assert(units, check.DeepEquals, expected)
}
func (s *S) TestProvisionerUnitsStatus(c *check.C) {
app := app.App{Name: "myapplication"}
coll := s.p.Collection()
defer coll.Close()
err := coll.Insert(
container.Container{
Container: types.Container{
ID: "9930c24f1c4f",
AppName: app.Name,
Type: "python",
Status: provision.StatusBuilding.String(),
IP: "127.0.0.4",
HostAddr: "10.0.0.7",
HostPort: "9025",
},
},
container.Container{
Container: types.Container{
ID: "9930c24f1c4j",
AppName: app.Name,
Type: "python",
Status: provision.StatusError.String(),
IP: "127.0.0.4",
HostAddr: "10.0.0.7",
HostPort: "9025",
},
},
)
c.Assert(err, check.IsNil)
units, err := s.p.Units(&app)
c.Assert(err, check.IsNil)
sortUnits(units)
expected := []provision.Unit{
{
ID: "9930c24f1c4f",
AppName: "myapplication",
Type: "python",
Status: provision.StatusBuilding,
IP: "10.0.0.7",
Address: &url.URL{
Scheme: "http",
Host: "10.0.0.7:9025",
},
},
{
ID: "9930c24f1c4j",
AppName: "myapplication",
Type: "python",
Status: provision.StatusError,
IP: "10.0.0.7",
Address: &url.URL{
Scheme: "http",
Host: "10.0.0.7:9025",
},
},
}
c.Assert(units, check.DeepEquals, expected)
}
func (s *S) TestProvisionerUnitsIp(c *check.C) {
app := app.App{Name: "myapplication"}
coll := s.p.Collection()
defer coll.Close()
err := coll.Insert(
container.Container{
Container: types.Container{
ID: "9930c24f1c4f",
AppName: app.Name,
Type: "python",
Status: provision.StatusBuilding.String(),
IP: "127.0.0.4",
HostPort: "9025",
HostAddr: "127.0.0.1",
},
},
)
c.Assert(err, check.IsNil)
units, err := s.p.Units(&app)
c.Assert(err, check.IsNil)
expected := []provision.Unit{
{
ID: "9930c24f1c4f",
AppName: "myapplication",
Type: "python",
IP: "127.0.0.1",
Status: provision.StatusBuilding,
Address: &url.URL{
Scheme: "http",
Host: "127.0.0.1:9025",
},
},
}
c.Assert(units, check.DeepEquals, expected)
}
func (s *S) TestRegisterUnit(c *check.C) {
a := &app.App{Name: "myawesomeapp"}
err := s.conn.Apps().Insert(a)
c.Assert(err, check.IsNil)
opts := newContainerOpts{Status: provision.StatusStarting.String(), AppName: "myawesomeapp"}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
container.IP = "xinvalidx"
coll := s.p.Collection()
defer coll.Close()
err = coll.Update(bson.M{"id": container.ID}, container)
c.Assert(err, check.IsNil)
err = s.p.RegisterUnit(a, container.ID, nil)
c.Assert(err, check.IsNil)
dbCont, err := s.p.GetContainer(container.ID)
c.Assert(err, check.IsNil)
c.Assert(dbCont.IP, check.Matches, `\d+\.\d+\.\d+\.\d+`)
c.Assert(dbCont.Status, check.Equals, provision.StatusStarted.String())
}
func (s *S) TestRegisterUnitBuildingContainer(c *check.C) {
a := &app.App{Name: "myawesomeapp"}
opts := newContainerOpts{Status: provision.StatusBuilding.String(), AppName: a.Name}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
container.IP = "xinvalidx"
coll := s.p.Collection()
defer coll.Close()
err = coll.Update(bson.M{"id": container.ID}, container)
c.Assert(err, check.IsNil)
err = s.p.RegisterUnit(a, container.ID, nil)
c.Assert(err, check.IsNil)
dbCont, err := s.p.GetContainer(container.ID)
c.Assert(err, check.IsNil)
c.Assert(dbCont.IP, check.Matches, `xinvalidx`)
c.Assert(dbCont.Status, check.Equals, provision.StatusBuilding.String())
}
func (s *S) TestRegisterUnitSavesCustomDataRawProcfile(c *check.C) {
a := &app.App{Name: "myawesomeapp"}
opts := newContainerOpts{Status: provision.StatusBuilding.String(), AppName: a.Name}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
container.IP = "xinvalidx"
coll := s.p.Collection()
defer coll.Close()
err = coll.Update(bson.M{"id": container.ID}, container)
c.Assert(err, check.IsNil)
data := map[string]interface{}{"mydata": "value", "procfile": "web: python myapp.py"}
err = s.p.RegisterUnit(a, container.ID, data)
c.Assert(err, check.IsNil)
deployingVersion, err := servicemanager.AppVersion.VersionByPendingImage(a, container.BuildingImage)
c.Assert(err, check.IsNil)
c.Assert(deployingVersion.VersionInfo().CustomData["mydata"], check.DeepEquals, data["mydata"])
expectedProcesses := map[string][]string{"web": {"python myapp.py"}}
c.Assert(deployingVersion.VersionInfo().Processes, check.DeepEquals, expectedProcesses)
}
func (s *S) TestRegisterUnitSavesCustomDataParsedProcesses(c *check.C) {
a := &app.App{Name: "myawesomeapp"}
opts := newContainerOpts{Status: provision.StatusBuilding.String(), AppName: a.Name}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
container.IP = "xinvalidx"
coll := s.p.Collection()
defer coll.Close()
err = coll.Update(bson.M{"id": container.ID}, container)
c.Assert(err, check.IsNil)
data := map[string]interface{}{
"mydata": "value",
"procfile": "web: python myapp.py",
"processes": map[string]interface{}{
"web": "python web.py",
"worker": "python worker.py",
},
}
err = s.p.RegisterUnit(a, container.ID, data)
c.Assert(err, check.IsNil)
deployingVersion, err := servicemanager.AppVersion.VersionByPendingImage(a, container.BuildingImage)
c.Assert(err, check.IsNil)
c.Assert(deployingVersion.VersionInfo().CustomData["mydata"], check.DeepEquals, data["mydata"])
expectedProcesses := map[string][]string{"web": {"python web.py"}, "worker": {"python worker.py"}}
c.Assert(deployingVersion.VersionInfo().Processes, check.DeepEquals, expectedProcesses)
}
func (s *S) TestRegisterUnitInvalidProcfile(c *check.C) {
a := &app.App{Name: "myawesomeapp"}
opts := newContainerOpts{Status: provision.StatusBuilding.String(), AppName: a.Name}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
container.IP = "xinvalidx"
coll := s.p.Collection()
defer coll.Close()
err = coll.Update(bson.M{"id": container.ID}, container)
c.Assert(err, check.IsNil)
data := map[string]interface{}{"mydata": "value", "procfile": "aaaaaaaaaaaaaaaaaaaaaa"}
err = s.p.RegisterUnit(a, container.ID, data)
c.Assert(err, check.NotNil)
c.Assert(err.Error(), check.Equals, "invalid Procfile")
}
func (s *S) TestRunRestartAfterHooks(c *check.C) {
a := &app.App{Name: "myrestartafterapp"}
customData := map[string]interface{}{
"hooks": map[string]interface{}{
"restart": map[string]interface{}{
"after": []string{"cmd1", "cmd2"},
},
},
}
version, err := newVersionForApp(s.p, a, customData)
c.Assert(err, check.IsNil)
err = s.conn.Apps().Insert(a)
c.Assert(err, check.IsNil)
opts := newContainerOpts{AppName: a.Name}
container, err := s.newContainer(&opts, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(container)
var reqBodies [][]byte
s.server.CustomHandler("/containers/"+container.ID+"/exec", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
data, _ := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
reqBodies = append(reqBodies, data)
s.server.DefaultHandler().ServeHTTP(w, r)
}))
defer container.Remove(s.p.ClusterClient(), s.p.ActionLimiter())
var buf bytes.Buffer
yamlData, err := version.TsuruYamlData()
c.Assert(err, check.IsNil)
err = s.p.runRestartAfterHooks(container, yamlData, &buf)
c.Assert(err, check.IsNil)
c.Assert(buf.String(), check.Equals, "")
c.Assert(reqBodies, check.HasLen, 2)
var req1, req2 map[string]interface{}
err = json.Unmarshal(reqBodies[0], &req1)
c.Assert(err, check.IsNil)
err = json.Unmarshal(reqBodies[1], &req2)
c.Assert(err, check.IsNil)
c.Assert(req1, check.DeepEquals, map[string]interface{}{
"AttachStdout": true,
"AttachStderr": true,
"Cmd": []interface{}{"/bin/sh", "-lc", "cmd1"},
"Container": container.ID,
})
c.Assert(req2, check.DeepEquals, map[string]interface{}{
"AttachStdout": true,
"AttachStderr": true,
"Cmd": []interface{}{"/bin/sh", "-lc", "cmd2"},
"Container": container.ID,
})
}
func (s *S) TestExecuteCommandStdin(c *check.C) {
a := provisiontest.NewFakeApp("almah", "static", 1)
cont, err := s.newContainer(&newContainerOpts{AppName: a.GetName()}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont)
buf := safe.NewBuffer([]byte("echo test"))
conn := &provisiontest.FakeConn{Buf: buf}
err = s.p.ExecuteCommand(provision.ExecOptions{
App: a,
Stdout: conn,
Stderr: conn,
Stdin: conn,
Width: 10,
Height: 10,
Units: []string{cont.ID},
Cmds: []string{"ls", "-l"},
})
c.Assert(err, check.IsNil)
}
func (s *S) TestDryMode(c *check.C) {
appInstance := provisiontest.NewFakeApp("myapp", "python", 0)
s.p.Provision(appInstance)
version, err := newSuccessfulVersionForApp(s.p, appInstance, nil)
c.Assert(err, check.IsNil)
_, err = addContainersWithHost(&changeUnitsPipelineArgs{
toHost: "127.0.0.1",
toAdd: map[string]*containersToAdd{"web": {Quantity: 5}},
app: appInstance,
version: version,
provisioner: s.p,
})
c.Assert(err, check.IsNil)
newProv, err := s.p.dryMode(nil)
c.Assert(err, check.IsNil)
contsNew, err := newProv.listAllContainers()
c.Assert(err, check.IsNil)
c.Assert(contsNew, check.HasLen, 5)
}
func (s *S) TestAddContainerDefaultProcess(c *check.C) {
customData := map[string]interface{}{
"processes": map[string]interface{}{
"web": "python myapp.py",
},
}
appName := "my-fake-app"
fakeApp := provisiontest.NewFakeApp(appName, "python", 0)
version, err := newSuccessfulVersionForApp(s.p, fakeApp, customData)
c.Assert(err, check.IsNil)
s.p.Provision(fakeApp)
buf := safe.NewBuffer(nil)
args := changeUnitsPipelineArgs{
app: fakeApp,
provisioner: s.p,
writer: buf,
toAdd: map[string]*containersToAdd{"": {Quantity: 2}},
version: version,
}
containers, err := addContainersWithHost(&args)
c.Assert(err, check.IsNil)
c.Assert(containers, check.HasLen, 2)
parts := strings.Split(buf.String(), "\n")
c.Assert(parts, check.HasLen, 5)
c.Assert(parts[0], check.Equals, "")
c.Assert(parts[1], check.Matches, `---- Starting 2 new units \[web: 2\] ----`)
c.Assert(parts[2], check.Matches, ` ---> Started unit .+ \[web\]`)
c.Assert(parts[3], check.Matches, ` ---> Started unit .+ \[web\]`)
c.Assert(parts[4], check.Equals, "")
}
func (s *S) TestInitializeSetsBSHook(c *check.C) {
var p dockerProvisioner
err := p.Initialize()
c.Assert(err, check.IsNil)
c.Assert(p.cluster, check.NotNil)
c.Assert(p.cluster.Hooks(cluster.HookEventBeforeContainerCreate), check.DeepEquals, []cluster.Hook{&internalNodeContainer.ClusterHook{Provisioner: &p}})
}
func (s *S) TestProvisionerLogsEnabled(c *check.C) {
appName := "my-fake-app"
fakeApp := provisiontest.NewFakeApp(appName, "python", 0)
fakeApp.Pool = "mypool"
tests := []struct {
envs []string
poolEnvs map[string][]string
enabled bool
msg string
err error
}{
{nil, nil, true, "", nil},
{[]string{}, nil, true, "", nil},
{[]string{"LOG_BACKENDS=xxx"}, nil, false, "Logs not available through tsuru. Enabled log backends are:\n* xxx", nil},
{[]string{"LOG_BACKENDS=xxx", "LOG_XXX_DOC=my doc"}, nil, false, "Logs not available through tsuru. Enabled log backends are:\n* xxx: my doc", nil},
{[]string{"LOG_BACKENDS=a, b , c"}, nil, false, "Logs not available through tsuru. Enabled log backends are:\n* a\n* b\n* c", nil},
{[]string{}, map[string][]string{"mypool": {"LOG_BACKENDS=abc"}}, false, "Logs not available through tsuru. Enabled log backends are:\n* abc", nil},
{[]string{}, map[string][]string{"mypool": {"LOG_BACKENDS=abc", "LOG_ABC_DOC=doc"}}, false, "Logs not available through tsuru. Enabled log backends are:\n* abc: doc", nil},
{[]string{}, map[string][]string{"otherpool": {"LOG_BACKENDS=abc"}}, true, "", nil},
{[]string{}, map[string][]string{"mypool": {"LOG_BACKENDS=abc, tsuru "}}, true, "", nil},
}
for i, t := range tests {
if t.envs != nil || t.poolEnvs != nil {
err := nodecontainer.AddNewContainer("", &nodecontainer.NodeContainerConfig{
Name: nodecontainer.BsDefaultName,
Config: docker.Config{
Env: t.envs,
Image: "img1",
},
})
c.Assert(err, check.IsNil)
for pool, envs := range t.poolEnvs {
err := nodecontainer.AddNewContainer(pool, &nodecontainer.NodeContainerConfig{
Name: nodecontainer.BsDefaultName,
Config: docker.Config{
Env: envs,
},
})
c.Assert(err, check.IsNil)
}
}
enabled, msg, err := s.p.LogsEnabled(fakeApp)
c.Assert(err, check.Equals, t.err)
c.Assert(enabled, check.Equals, t.enabled, check.Commentf("%d test", i))
c.Assert(msg, check.Equals, t.msg)
for pool := range t.poolEnvs {
err = nodecontainer.RemoveContainer(pool, nodecontainer.BsDefaultName)
c.Assert(err, check.IsNil)
}
}
}
func (s *S) TestProvisionerLogsEnabledOtherDriver(c *check.C) {
appName := "my-fake-app"
fakeApp := provisiontest.NewFakeApp(appName, "python", 0)
fakeApp.Pool = "mypool"
logConf := container.DockerLogConfig{DockerLogConfig: types.DockerLogConfig{Driver: "x"}}
err := logConf.Save("")
c.Assert(err, check.IsNil)
enabled, msg, err := s.p.LogsEnabled(fakeApp)
c.Assert(err, check.IsNil)
c.Assert(enabled, check.Equals, false)
c.Assert(msg, check.Equals, "Logs not available through tsuru. Enabled log driver is \"x\".")
logConf = container.DockerLogConfig{DockerLogConfig: types.DockerLogConfig{Driver: "bs"}}
err = logConf.Save("")
c.Assert(err, check.IsNil)
enabled, msg, err = s.p.LogsEnabled(fakeApp)
c.Assert(err, check.IsNil)
c.Assert(enabled, check.Equals, true)
c.Assert(msg, check.Equals, "")
}
func (s *S) TestProvisionerRoutableAddresses(c *check.C) {
appName := "my-fake-app"
fakeApp := provisiontest.NewFakeApp(appName, "python", 0)
routes, err := s.p.RoutableAddresses(fakeApp)
c.Assert(err, check.IsNil)
c.Assert(routes, check.DeepEquals, []url.URL{})
version, err := newSuccessfulVersionForApp(s.p, fakeApp, nil)
c.Assert(err, check.IsNil)
err = servicemanager.AppVersion.DeleteVersion(fakeApp.GetName(), version.Version())
c.Assert(err, check.IsNil)
routes, err = s.p.RoutableAddresses(fakeApp)
c.Assert(err, check.IsNil)
c.Assert(routes, check.DeepEquals, []url.URL{})
version, err = newSuccessfulVersionForApp(s.p, fakeApp, nil)
c.Assert(err, check.IsNil)
conts, err := addContainersWithHost(&changeUnitsPipelineArgs{
toAdd: map[string]*containersToAdd{"web": {Quantity: 1}},
app: fakeApp,
version: version,
provisioner: s.p,
})
c.Assert(err, check.IsNil)
c.Assert(conts, check.HasLen, 1)
routes, err = s.p.RoutableAddresses(fakeApp)
c.Assert(err, check.IsNil)
c.Assert(routes, check.DeepEquals, []url.URL{
*conts[0].Address(),
})
}
func (s *S) TestProvisionerRoutableAddressesInvalidContainers(c *check.C) {
appName := "my-fake-app"
fakeApp := provisiontest.NewFakeApp(appName, "python", 0)
version, err := newSuccessfulVersionForApp(s.p, fakeApp, nil)
c.Assert(err, check.IsNil)
conts, err := addContainersWithHost(&changeUnitsPipelineArgs{
toAdd: map[string]*containersToAdd{"web": {Quantity: 3}},
app: fakeApp,
version: version,
provisioner: s.p,
})
c.Assert(err, check.IsNil)
c.Assert(conts, check.HasLen, 3)
conts[0].HostAddr = ""
conts[1].HostPort = ""
coll := s.p.Collection()
defer coll.Close()
err = coll.Update(bson.M{"id": conts[0].ID}, conts[0])
c.Assert(err, check.IsNil)
err = coll.Update(bson.M{"id": conts[1].ID}, conts[1])
c.Assert(err, check.IsNil)
routes, err := s.p.RoutableAddresses(fakeApp)
c.Assert(err, check.IsNil)
c.Assert(routes, check.DeepEquals, []url.URL{
*conts[2].Address(),
})
}
func (s *S) TestFilterAppsByUnitStatus(c *check.C) {
app1 := provisiontest.NewFakeApp("app1", "python", 0)
app2 := provisiontest.NewFakeApp("app2", "python", 0)
cont1, err := s.newContainer(&newContainerOpts{
AppName: app1.GetName(),
Status: "stopped",
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont1)
cont2, err := s.newContainer(&newContainerOpts{
AppName: app2.GetName(),
Status: "started",
}, nil)
c.Assert(err, check.IsNil)
defer s.removeTestContainer(cont2)
apps, err := s.p.FilterAppsByUnitStatus([]provision.App{app1}, nil)
c.Assert(apps, check.DeepEquals, []provision.App{})
c.Assert(err, check.IsNil)
apps, err = s.p.FilterAppsByUnitStatus(nil, []string{"building"})
c.Assert(apps, check.IsNil)
c.Assert(err, check.Not(check.IsNil))
apps, err = s.p.FilterAppsByUnitStatus(nil, nil)
c.Assert(apps, check.IsNil)
c.Assert(err, check.Not(check.IsNil))
apps, err = s.p.FilterAppsByUnitStatus([]provision.App{app1, app2}, []string{"started"})
c.Assert(apps, check.DeepEquals, []provision.App{app2})
c.Assert(err, check.IsNil)
apps, err = s.p.FilterAppsByUnitStatus([]provision.App{app1, app2}, []string{"building"})
c.Assert(apps, check.DeepEquals, []provision.App{})
c.Assert(err, check.IsNil)
}
func (s *S) TestListNodes(c *check.C) {
nodes, err := s.p.cluster.Nodes()
c.Assert(err, check.IsNil)
listedNodes, err := s.p.ListNodes([]string{nodes[0].Address})
c.Assert(err, check.IsNil)
c.Assert(listedNodes, check.DeepEquals, []provision.Node{
&clusterNodeWrapper{Node: &nodes[0], prov: s.p},
})
listedNodes, err = s.p.ListNodes(nil)
c.Assert(err, check.IsNil)
c.Assert(listedNodes, check.DeepEquals, []provision.Node{
&clusterNodeWrapper{Node: &nodes[0], prov: s.p},
})
listedNodes, err = s.p.ListNodes([]string{"notfound"})
c.Assert(err, check.IsNil)
c.Assert(listedNodes, check.DeepEquals, []provision.Node{})
}
func (s *S) TestListNodesWithFilter(c *check.C) {
p, err := s.startMultipleServersCluster()
c.Assert(err, check.IsNil)
mainDockerProvisioner = p
nodes, err := p.cluster.Nodes()
c.Assert(err, check.IsNil)
filter := &provTypes.NodeFilter{Metadata: map[string]string{"pool": "test-default", "m1": "v1"}}
listedNodes, err := p.ListNodesByFilter(filter)
c.Assert(err, check.IsNil)
c.Assert(listedNodes, check.DeepEquals, []provision.Node{
&clusterNodeWrapper{Node: &nodes[0], prov: p},
})
filter = &provTypes.NodeFilter{Metadata: map[string]string{"pool": "test-default"}}
listedNodes, err = p.ListNodesByFilter(filter)
c.Assert(err, check.IsNil)
c.Assert(listedNodes, check.DeepEquals, []provision.Node{
&clusterNodeWrapper{Node: &nodes[0], prov: p},
&clusterNodeWrapper{Node: &nodes[1], prov: p},
})
filter = &provTypes.NodeFilter{Metadata: map[string]string{"m1": "v1"}}
listedNodes, err = p.ListNodesByFilter(filter)
c.Assert(err, check.IsNil)
c.Assert(listedNodes, check.DeepEquals, []provision.Node{
&clusterNodeWrapper{Node: &nodes[0], prov: p},
})
filter = &provTypes.NodeFilter{Metadata: map[string]string{"m1": "v2"}}
listedNodes, err = p.ListNodesByFilter(filter)
c.Assert(err, check.IsNil)
c.Assert(listedNodes, check.DeepEquals, []provision.Node{})
}
func (s *S) TestAddNode(c *check.C) {
server, waitQueue := startFakeDockerNode(c)
defer server.Stop()
var p dockerProvisioner
err := p.Initialize()
c.Assert(err, check.IsNil)
p.cluster, _ = cluster.New(nil, &cluster.MapStorage{}, "")
mainDockerProvisioner = &p
opts := provision.AddNodeOptions{
Address: server.URL(),
Pool: "pool1",
Metadata: map[string]string{
"m1": "x1",
},
}
err = p.AddNode(opts)
c.Assert(err, check.IsNil)
waitQueue()
nodes, err := p.Cluster().Nodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 1)
c.Assert(nodes[0].Address, check.Equals, server.URL())
c.Assert(nodes[0].Metadata, check.DeepEquals, map[string]string{
"pool": "pool1",
"m1": "x1",
"LastSuccess": nodes[0].Metadata["LastSuccess"],
})
c.Assert(nodes[0].CreationStatus, check.Equals, cluster.NodeCreationStatusCreated)
}
func (s *S) TestAddRemoveAddNodeRace(c *check.C) {
pong := make(chan struct{}, 2)
var callCount int32
server, err := testing.NewServer("127.0.0.1:0", nil, func(r *http.Request) {
if strings.Contains(r.URL.Path, "ping") {
pong <- struct{}{}
if atomic.AddInt32(&callCount, 1) == 1 {
time.Sleep(500 * time.Millisecond)
}
}
})
c.Assert(err, check.IsNil)
defer server.Stop()
var p dockerProvisioner
err = p.Initialize()
c.Assert(err, check.IsNil)
p.cluster, _ = cluster.New(nil, &cluster.MapStorage{}, "")
mainDockerProvisioner = &p
opts := provision.AddNodeOptions{
Address: server.URL(),
Pool: "pool1",
Metadata: map[string]string{
"m1": "x1",
},
}
err = p.AddNode(opts)
c.Assert(err, check.IsNil)
<-pong
err = p.RemoveNode(provision.RemoveNodeOptions{
Address: server.URL(),
})
c.Assert(err, check.IsNil)
opts = provision.AddNodeOptions{
Address: server.URL(),
Pool: "pool2",
Metadata: map[string]string{
"m2": "x2",
},
}
err = p.AddNode(opts)
c.Assert(err, check.IsNil)
<-pong
queue.ResetQueue()
c.Assert(atomic.LoadInt32(&callCount), check.Equals, int32(2))
nodes, err := p.Cluster().Nodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 1)
c.Assert(nodes[0].Address, check.Equals, server.URL())
c.Assert(nodes[0].Metadata, check.DeepEquals, map[string]string{
"pool": "pool2",
"m2": "x2",
"LastSuccess": nodes[0].Metadata["LastSuccess"],
})
c.Assert(nodes[0].CreationStatus, check.Equals, cluster.NodeCreationStatusCreated)
}
func (s *S) TestAddNodeNoAddress(c *check.C) {
var p dockerProvisioner
err := p.Initialize()
c.Assert(err, check.IsNil)
p.cluster, _ = cluster.New(nil, &cluster.MapStorage{}, "")
mainDockerProvisioner = &p
opts := provision.AddNodeOptions{}
err = p.AddNode(opts)
c.Assert(err, check.ErrorMatches, "Invalid address")
}
func (s *S) TestAddNodeWithWait(c *check.C) {
server, _ := startFakeDockerNode(c)
defer server.Stop()
var p dockerProvisioner
err := p.Initialize()
c.Assert(err, check.IsNil)
p.cluster, _ = cluster.New(nil, &cluster.MapStorage{}, "")
mainDockerProvisioner = &p
opts := provision.AddNodeOptions{
Address: server.URL(),
Pool: "pool1",
WaitTO: time.Second,
}
err = p.AddNode(opts)
c.Assert(err, check.IsNil)
nodes, err := p.Cluster().Nodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 1)
c.Assert(nodes[0].Address, check.Equals, server.URL())
c.Assert(nodes[0].Metadata, check.DeepEquals, map[string]string{
"pool": "pool1",
"LastSuccess": nodes[0].Metadata["LastSuccess"],
})
c.Assert(nodes[0].CreationStatus, check.Equals, cluster.NodeCreationStatusCreated)
}
func (s *S) TestRemoveNode(c *check.C) {
var buf bytes.Buffer
nodes, err := s.p.Cluster().Nodes()
c.Assert(err, check.IsNil)
opts := provision.RemoveNodeOptions{
Address: nodes[0].Address,
Writer: &buf,
}
err = s.p.RemoveNode(opts)
c.Assert(err, check.IsNil)
nodes, err = s.p.Cluster().Nodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 0)
c.Assert(buf.String(), check.Equals, "")
}
func (s *S) TestRemoveNodeRebalanceNoUnits(c *check.C) {
var buf bytes.Buffer
nodes, err := s.p.Cluster().Nodes()
c.Assert(err, check.IsNil)
opts := provision.RemoveNodeOptions{
Address: nodes[0].Address,
Rebalance: true,
Writer: &buf,
}
err = s.p.RemoveNode(opts)
c.Assert(err, check.IsNil)
nodes, err = s.p.Cluster().Nodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 0)
c.Assert(buf.String(), check.Equals, "")
}
func (s *S) TestRemoveNodeRebalanceWithUnits(c *check.C) {
p, err := s.startMultipleServersCluster()
c.Assert(err, check.IsNil)
mainDockerProvisioner = p
appInstance := provisiontest.NewFakeApp("myapp", "python", 0)
p.Provision(appInstance)
version, err := newSuccessfulVersionForApp(s.p, appInstance, nil)
c.Assert(err, check.IsNil)
_, err = addContainersWithHost(&changeUnitsPipelineArgs{
toHost: "127.0.0.1",
toAdd: map[string]*containersToAdd{"web": {Quantity: 5}},
app: appInstance,
version: version,
provisioner: p,
})
c.Assert(err, check.IsNil)
appStruct := s.newAppFromFake(appInstance)
err = s.conn.Apps().Insert(appStruct)
c.Assert(err, check.IsNil)
buf := safe.NewBuffer(nil)
nodes, err := p.Cluster().Nodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 2)
c.Assert(net.URLToHost(nodes[0].Address), check.Equals, "127.0.0.1")
opts := provision.RemoveNodeOptions{
Address: nodes[0].Address,
Rebalance: true,
Writer: buf,
}
err = p.RemoveNode(opts)
c.Assert(err, check.IsNil)
nodes, err = p.Cluster().Nodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 1)
c.Assert(net.URLToHost(nodes[0].Address), check.Equals, "localhost")
parts := strings.Split(buf.String(), "\n")
c.Assert(parts, check.HasLen, 11)
c.Assert(parts[0], check.Matches, `Moving unit .+? for "myapp" from 127\.0\.0\.1\.\.\.`)
containerList, err := p.listContainersByHost(net.URLToHost(nodes[0].Address))
c.Assert(err, check.IsNil)
c.Assert(containerList, check.HasLen, 5)
}
func (s *S) TestRemoveNodeNoAddress(c *check.C) {
var buf bytes.Buffer
opts := provision.RemoveNodeOptions{
Writer: &buf,
}
err := s.p.RemoveNode(opts)
c.Assert(err, check.Equals, provision.ErrNodeNotFound)
c.Assert(buf.String(), check.Equals, "")
nodes, err := s.p.Cluster().Nodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 1)
}
func (s *S) TestNodeUnits(c *check.C) {
nodes, err := s.p.ListNodes(nil)
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 1)
units, err := nodes[0].Units()
c.Assert(err, check.IsNil)
c.Assert(units, check.DeepEquals, []provision.Unit{})
appInstance := provisiontest.NewFakeApp("myapp", "python", 0)
err = s.p.Provision(appInstance)
c.Assert(err, check.IsNil)
version, err := newSuccessfulVersionForApp(s.p, appInstance, nil)
c.Assert(err, check.IsNil)
appStruct := s.newAppFromFake(appInstance)
err = s.conn.Apps().Insert(appStruct)
c.Assert(err, check.IsNil)
containers, err := addContainersWithHost(&changeUnitsPipelineArgs{
toAdd: map[string]*containersToAdd{"web": {Quantity: 5}},
app: appInstance,
version: version,
provisioner: s.p,
})
c.Assert(err, check.IsNil)
nodes, err = s.p.ListNodes(nil)
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 1)
units, err = nodes[0].Units()
c.Assert(err, check.IsNil)
expected := []provision.Unit{
containers[0].AsUnit(appInstance),
containers[1].AsUnit(appInstance),
containers[2].AsUnit(appInstance),
containers[3].AsUnit(appInstance),
containers[4].AsUnit(appInstance),
}
sortUnits(units)
sortUnits(expected)
c.Assert(units, check.DeepEquals, expected)
}
func (s *S) TestUpdateNode(c *check.C) {
nodes, err := s.p.Cluster().Nodes()
c.Assert(err, check.IsNil)
opts := provision.UpdateNodeOptions{
Address: nodes[0].Address,
Metadata: map[string]string{
"m1": "v1",
"m2": "v2",
},
}
err = s.p.UpdateNode(opts)
c.Assert(err, check.IsNil)
nodes, err = s.p.Cluster().Nodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 1)
c.Assert(nodes[0].Status(), check.Equals, "waiting")
c.Assert(nodes[0].Metadata, check.DeepEquals, map[string]string{
"m1": "v1",
"m2": "v2",
"pool": "test-default",
})
opts = provision.UpdateNodeOptions{
Address: nodes[0].Address,
Metadata: map[string]string{
"m1": "",
"m3": "v3",
},
}
err = s.p.UpdateNode(opts)
c.Assert(err, check.IsNil)
nodes, err = s.p.Cluster().Nodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 1)
c.Assert(nodes[0].Status(), check.Equals, "waiting")
c.Assert(nodes[0].Metadata, check.DeepEquals, map[string]string{
"pool": "test-default",
"m2": "v2",
"m3": "v3",
})
}
func (s *S) TestUpdateNodeDisableEnable(c *check.C) {
nodes, err := s.p.Cluster().Nodes()
c.Assert(err, check.IsNil)
opts := provision.UpdateNodeOptions{
Address: nodes[0].Address,
Disable: true,
}
err = s.p.UpdateNode(opts)
c.Assert(err, check.IsNil)
nodes, err = s.p.Cluster().Nodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 0)
nodes, err = s.p.Cluster().UnfilteredNodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 1)
c.Assert(nodes[0].Status(), check.Equals, "disabled")
opts = provision.UpdateNodeOptions{
Address: nodes[0].Address,
Metadata: map[string]string{"a": "b"},
}
err = s.p.UpdateNode(opts)
c.Assert(err, check.IsNil)
nodes, err = s.p.Cluster().UnfilteredNodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 1)
c.Assert(nodes[0].Status(), check.Equals, "disabled")
c.Assert(nodes[0].Metadata["a"], check.Equals, "b")
opts = provision.UpdateNodeOptions{
Address: nodes[0].Address,
Enable: true,
}
err = s.p.UpdateNode(opts)
c.Assert(err, check.IsNil)
nodes, err = s.p.Cluster().Nodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 1)
c.Assert(nodes[0].Status(), check.Equals, "waiting")
c.Assert(nodes[0].Metadata["a"], check.Equals, "b")
}
func (s *S) TestUpdateNodeNotFound(c *check.C) {
opts := provision.UpdateNodeOptions{}
err := s.p.UpdateNode(opts)
c.Assert(err, check.Equals, provision.ErrNodeNotFound)
}
func (s *S) TestUpdateNodeEnableCanMoveContainers(c *check.C) {
nodes, err := s.p.Cluster().Nodes()
c.Assert(err, check.IsNil)
opts := provision.UpdateNodeOptions{
Address: nodes[0].Address,
Disable: true,
}
err = s.p.UpdateNode(opts)
c.Assert(err, check.IsNil)
opts = provision.UpdateNodeOptions{
Address: nodes[0].Address,
Enable: true,
}
err = s.p.UpdateNode(opts)
c.Assert(err, check.IsNil)
var buf bytes.Buffer
err = s.p.MoveContainers("localhost", "127.0.0.1", &buf)
c.Assert(err, check.IsNil)
parts := strings.Split(buf.String(), "\n")
c.Assert(parts, check.DeepEquals, []string{
"No units to move in localhost",
"",
})
}
func (s *S) TestUpdateNodeDisableCanMoveContainers(c *check.C) {
p, err := s.startMultipleServersCluster()
c.Assert(err, check.IsNil)
mainDockerProvisioner = p
appInstance := provisiontest.NewFakeApp("myapp", "python", 0)
p.Provision(appInstance)
version, err := newSuccessfulVersionForApp(s.p, appInstance, nil)
c.Assert(err, check.IsNil)
_, err = addContainersWithHost(&changeUnitsPipelineArgs{
toHost: "127.0.0.1",
toAdd: map[string]*containersToAdd{"web": {Quantity: 1}},
app: appInstance,
version: version,
provisioner: p,
})
c.Assert(err, check.IsNil)
_, err = addContainersWithHost(&changeUnitsPipelineArgs{
toHost: "localhost",
toAdd: map[string]*containersToAdd{"web": {Quantity: 1}},
app: appInstance,
version: version,
provisioner: p,
})
c.Assert(err, check.IsNil)
appStruct := s.newAppFromFake(appInstance)
err = s.conn.Apps().Insert(appStruct)
c.Assert(err, check.IsNil)
buf := safe.NewBuffer(nil)
nodes, err := p.Cluster().Nodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 2)
c.Assert(net.URLToHost(nodes[0].Address), check.Equals, "127.0.0.1")
c.Assert(net.URLToHost(nodes[1].Address), check.Equals, "localhost")
opts := provision.UpdateNodeOptions{
Address: nodes[0].Address,
Disable: true,
}
err = p.UpdateNode(opts)
c.Assert(err, check.IsNil)
err = p.MoveContainers("127.0.0.1", "localhost", buf)
c.Assert(err, check.IsNil)
parts := strings.Split(buf.String(), "\n")
c.Assert(parts, check.HasLen, 4)
c.Assert(parts[0], check.Equals, "Moving 1 units...")
buf.Reset()
err = p.MoveContainers("localhost", "127.0.0.1", buf)
c.Assert(err, check.IsNil)
parts = strings.Split(buf.String(), "\n")
c.Assert(parts, check.HasLen, 6)
c.Assert(parts[0], check.Equals, "Moving 2 units...")
}
func (s *S) TestNodeForNodeData(c *check.C) {
appInstance := provisiontest.NewFakeApp("myapp", "python", 0)
s.p.Provision(appInstance)
version, err := newSuccessfulVersionForApp(s.p, appInstance, nil)
c.Assert(err, check.IsNil)
conts, err := addContainersWithHost(&changeUnitsPipelineArgs{
toAdd: map[string]*containersToAdd{"web": {Quantity: 1}},
app: appInstance,
version: version,
provisioner: s.p,
})
c.Assert(err, check.IsNil)
data := provision.NodeStatusData{
Units: []provision.UnitStatusData{
{ID: conts[0].ID},
},
}
node, err := s.p.NodeForNodeData(data)
c.Assert(err, check.IsNil)
c.Assert(node.Address(), check.Equals, s.server.URL())
data = provision.NodeStatusData{
Units: []provision.UnitStatusData{
{Name: conts[0].Name},
},
}
node, err = s.p.NodeForNodeData(data)
c.Assert(err, check.IsNil)
c.Assert(node.Address(), check.Equals, s.server.URL())
data = provision.NodeStatusData{
Units: []provision.UnitStatusData{
{ID: "invalidid"},
},
}
_, err = s.p.NodeForNodeData(data)
c.Assert(err, check.Equals, provision.ErrNodeNotFound)
}
func (s *S) TestRebalanceNodes(c *check.C) {
p, err := s.startMultipleServersCluster()
c.Assert(err, check.IsNil)
mainDockerProvisioner = p
appInstance := provisiontest.NewFakeApp("myapp", "python", 0)
p.Provision(appInstance)
version, err := newSuccessfulVersionForApp(s.p, appInstance, nil)
c.Assert(err, check.IsNil)
_, err = addContainersWithHost(&changeUnitsPipelineArgs{
toHost: "127.0.0.1",
toAdd: map[string]*containersToAdd{"web": {Quantity: 4}},
app: appInstance,
version: version,
provisioner: p,
})
c.Assert(err, check.IsNil)
appStruct := s.newAppFromFake(appInstance)
appStruct.Pool = "test-default"
err = s.conn.Apps().Insert(appStruct)
c.Assert(err, check.IsNil)
buf := safe.NewBuffer(nil)
evt, err := event.New(&event.Opts{
Target: event.Target{Type: event.TargetTypeGlobal},
Kind: permission.PermNodeUpdateRebalance,
Owner: s.token,
Allowed: event.Allowed(permission.PermPoolReadEvents),
})
c.Assert(err, check.IsNil)
evt.SetLogWriter(buf)
toRebalance, err := p.RebalanceNodes(provision.RebalanceNodesOptions{
Event: evt,
MetadataFilter: map[string]string{"pool": "test-default"},
})
c.Assert(err, check.IsNil, check.Commentf("Log: %s", buf.String()))
c.Assert(toRebalance, check.Equals, true)
c.Assert(buf.String(), check.Matches, "(?s).*Rebalancing as gap is 4, after rebalance gap will be 0.*Moving unit.*Moved unit.*")
containers, err := p.listContainersByHost("localhost")
c.Assert(err, check.IsNil)
c.Assert(containers, check.HasLen, 2)
containers, err = p.listContainersByHost("127.0.0.1")
c.Assert(err, check.IsNil)
c.Assert(containers, check.HasLen, 2)
}
func (s *S) TestRebalanceNodesCancel(c *check.C) {
p, err := s.startMultipleServersCluster()
c.Assert(err, check.IsNil)
blockCh := make(chan struct{})
createCalled := make(chan struct{}, 2)
s.extraServer.SetHook(func(r *http.Request) {
if r.URL.Path == "/containers/create" {
createCalled <- struct{}{}
<-blockCh
}
})
mainDockerProvisioner = p
appInstance := provisiontest.NewFakeApp("myapp", "python", 0)
p.Provision(appInstance)
version, err := newSuccessfulVersionForApp(s.p, appInstance, nil)
c.Assert(err, check.IsNil)
_, err = addContainersWithHost(&changeUnitsPipelineArgs{
toHost: "127.0.0.1",
toAdd: map[string]*containersToAdd{"web": {Quantity: 4}},
app: appInstance,
version: version,
provisioner: p,
})
c.Assert(err, check.IsNil)
appStruct := s.newAppFromFake(appInstance)
appStruct.Pool = "test-default"
err = s.conn.Apps().Insert(appStruct)
c.Assert(err, check.IsNil)
buf := safe.NewBuffer(nil)
evt, err := event.New(&event.Opts{
Target: event.Target{Type: event.TargetTypeGlobal},
Kind: permission.PermNodeUpdateRebalance,
Owner: s.token,
Allowed: event.Allowed(permission.PermPoolReadEvents),
Cancelable: true,
AllowedCancel: event.Allowed(permission.PermAppUpdateEvents),
})
c.Assert(err, check.IsNil)
evt.SetLogWriter(buf)
done := make(chan bool)
go func() {
defer close(done)
toRebalance, rebalanceErr := p.RebalanceNodes(provision.RebalanceNodesOptions{
Event: evt,
MetadataFilter: map[string]string{"pool": "test-default"},
})
c.Assert(rebalanceErr, check.ErrorMatches, "(?s).*Caused by: unit creation canceled by user action.*")
c.Assert(toRebalance, check.Equals, true)
}()
<-createCalled
evtDB, err := event.GetByID(evt.UniqueID)
c.Assert(err, check.IsNil)
err = evtDB.TryCancel("because yes", "[email protected]")
c.Assert(err, check.IsNil)
close(blockCh)
<-done
c.Assert(buf.String(), check.Matches, "(?s).*Rebalancing as gap is 4, after rebalance gap will be 0.*Moving unit.*")
containers, err := p.listContainersByHost("localhost")
c.Assert(err, check.IsNil)
c.Assert(containers, check.HasLen, 0)
containers, err = p.listContainersByHost("127.0.0.1")
c.Assert(err, check.IsNil)
c.Assert(containers, check.HasLen, 4)
}
func (s *S) TestRebalanceNodesNoNeed(c *check.C) {
p, err := s.startMultipleServersCluster()
c.Assert(err, check.IsNil)
mainDockerProvisioner = p
appInstance := provisiontest.NewFakeApp("myapp", "python", 0)
p.Provision(appInstance)
version, err := newSuccessfulVersionForApp(s.p, appInstance, nil)
c.Assert(err, check.IsNil)
c1, err := addContainersWithHost(&changeUnitsPipelineArgs{
toHost: "127.0.0.1",
toAdd: map[string]*containersToAdd{"web": {Quantity: 2}},
app: appInstance,
version: version,
provisioner: p,
})
c.Assert(err, check.IsNil)
c2, err := addContainersWithHost(&changeUnitsPipelineArgs{
toHost: "localhost",
toAdd: map[string]*containersToAdd{"web": {Quantity: 2}},
app: appInstance,
version: version,
provisioner: p,
})
c.Assert(err, check.IsNil)
appStruct := s.newAppFromFake(appInstance)
appStruct.Pool = "test-default"
err = s.conn.Apps().Insert(appStruct)
c.Assert(err, check.IsNil)
buf := safe.NewBuffer(nil)
evt, err := event.New(&event.Opts{
Target: event.Target{Type: event.TargetTypeGlobal},
Kind: permission.PermNodeUpdateRebalance,
Owner: s.token,
Allowed: event.Allowed(permission.PermPoolReadEvents),
})
c.Assert(err, check.IsNil)
evt.SetLogWriter(buf)
toRebalance, err := p.RebalanceNodes(provision.RebalanceNodesOptions{
Event: evt,
MetadataFilter: map[string]string{"pool": "test-default"},
})
c.Assert(err, check.IsNil, check.Commentf("Log: %s", buf.String()))
c.Assert(toRebalance, check.Equals, false)
c.Assert(buf.String(), check.Matches, "")
conts, err := p.ListContainers(nil)
c.Assert(err, check.IsNil)
c.Assert(conts, check.Not(check.DeepEquals), append(c1, c2...))
}
func (s *S) TestRebalanceNodesNoNeedForce(c *check.C) {
p, err := s.startMultipleServersCluster()
c.Assert(err, check.IsNil)
mainDockerProvisioner = p
appInstance := provisiontest.NewFakeApp("myapp", "python", 0)
p.Provision(appInstance)
version, err := newSuccessfulVersionForApp(s.p, appInstance, nil)
c.Assert(err, check.IsNil)
c1, err := addContainersWithHost(&changeUnitsPipelineArgs{
toHost: "127.0.0.1",
toAdd: map[string]*containersToAdd{"web": {Quantity: 2}},
app: appInstance,
version: version,
provisioner: p,
})
c.Assert(err, check.IsNil)
c2, err := addContainersWithHost(&changeUnitsPipelineArgs{
toHost: "localhost",
toAdd: map[string]*containersToAdd{"web": {Quantity: 2}},
app: appInstance,
version: version,
provisioner: p,
})
c.Assert(err, check.IsNil)
appStruct := s.newAppFromFake(appInstance)
appStruct.Pool = "test-default"
err = s.conn.Apps().Insert(appStruct)
c.Assert(err, check.IsNil)
buf := safe.NewBuffer(nil)
evt, err := event.New(&event.Opts{
Target: event.Target{Type: event.TargetTypeGlobal},
Kind: permission.PermNodeUpdateRebalance,
Owner: s.token,
Allowed: event.Allowed(permission.PermPoolReadEvents),
})
c.Assert(err, check.IsNil)
evt.SetLogWriter(buf)
toRebalance, err := p.RebalanceNodes(provision.RebalanceNodesOptions{
Event: evt,
Force: true,
MetadataFilter: map[string]string{"pool": "test-default"},
})
c.Assert(err, check.IsNil, check.Commentf("Log: %s", buf.String()))
c.Assert(toRebalance, check.Equals, true)
c.Assert(buf.String(), check.Matches, "(?s).*Rebalancing 4 units.*Moving unit.*Moved unit.*")
conts, err := p.ListContainers(nil)
c.Assert(err, check.IsNil)
c.Assert(conts, check.Not(check.DeepEquals), append(c1, c2...))
}
func (s *S) TestRebalanceNodesDry(c *check.C) {
p, err := s.startMultipleServersCluster()
c.Assert(err, check.IsNil)
mainDockerProvisioner = p
appInstance := provisiontest.NewFakeApp("myapp", "python", 0)
p.Provision(appInstance)
version, err := newSuccessfulVersionForApp(s.p, appInstance, nil)
c.Assert(err, check.IsNil)
_, err = addContainersWithHost(&changeUnitsPipelineArgs{
toHost: "127.0.0.1",
toAdd: map[string]*containersToAdd{"web": {Quantity: 4}},
app: appInstance,
version: version,
provisioner: p,
})
c.Assert(err, check.IsNil)
appStruct := s.newAppFromFake(appInstance)
appStruct.Pool = "test-default"
err = s.conn.Apps().Insert(appStruct)
c.Assert(err, check.IsNil)
buf := safe.NewBuffer(nil)
evt, err := event.New(&event.Opts{
Target: event.Target{Type: event.TargetTypeGlobal},
Kind: permission.PermNodeUpdateRebalance,
Owner: s.token,
Allowed: event.Allowed(permission.PermPoolReadEvents),
})
c.Assert(err, check.IsNil)
evt.SetLogWriter(buf)
toRebalance, err := p.RebalanceNodes(provision.RebalanceNodesOptions{
Event: evt,
Dry: true,
MetadataFilter: map[string]string{"pool": "test-default"},
})
c.Assert(err, check.IsNil, check.Commentf("Log: %s", buf.String()))
c.Assert(toRebalance, check.Equals, true)
c.Assert(buf.String(), check.Matches, "(?s).*Rebalancing as gap is 4, after rebalance gap will be 0.*Would move unit.*")
containers, err := p.listContainersByHost("localhost")
c.Assert(err, check.IsNil)
c.Assert(containers, check.HasLen, 0)
containers, err = p.listContainersByHost("127.0.0.1")
c.Assert(err, check.IsNil)
c.Assert(containers, check.HasLen, 4)
}
| {
fakeHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "fake image")
})
return httptest.NewServer(fakeHandler)
} |
tw_dlg.js | tinyMCE.addI18n('tw.advlink_dlg',{"target_name":"\u76ee\u6a19\u540d\u7a31",classes:"\u985e\u5225",style:"\u6a23\u5f0f",id:"ID","popup_position":"\u4f4d\u7f6e(X/Y)",langdir:"\u8a9e\u8a00\u66f8\u5beb\u65b9\u5411","popup_size":"\u5927\u5c0f","popup_dependent":"\u700f\u89bd\u5668\u9650\u5236(\u50c5\u652f\u63f4 Mozilla/Firefox)","popup_resizable":"\u5f48\u7a97\u53ef\u8abf\u6574\u5927\u5c0f","popup_location":"\u986f\u793a\u5730\u5740\u6b04","popup_menubar":"\u986f\u793a\u529f\u80fd\u9078\u55ae\u5217","popup_toolbar":"\u986f\u793a\u5de5\u5177\u5217","popup_statusbar":"\u986f\u793a\u72c0\u614b\u5217","popup_scrollbars":"\u986f\u793a\u6efe\u52d5\u689d","popup_return":"\u63d2\u5165 \"return false\"","popup_name":"\u8996\u7a97\u540d\u7a31","popup_url":"\u5f48\u51fa\u8996\u7a97URL",popup:"JavaScript \u5f48\u51fa\u8996\u7a97","target_blank":"\u5728\u65b0\u8996\u7a97\u6253\u958b\u8d85\u9023\u7d50","target_top":"\u5728\u9802\u90e8\u8996\u7a97\u6253\u958b\u8d85\u9023\u7d50","target_parent":"\u5728\u7236\u8996\u7a97\u6253\u958b\u8d85\u9023\u7d50","target_same":"\u5728\u76ee\u524d\u8996\u7a97\u6253\u958b\u8d85\u9023\u7d50","anchor_names":"\u66f8\u7c64","popup_opts":"\u9078\u9805","advanced_props":"\u9ad8\u7d1a\u5c6c\u6027","event_props":"\u4e8b\u4ef6","popup_props":"\u5f48\u51fa\u8996\u7a97\u5c6c\u6027","general_props":"\u4e00\u822c\u5c6c\u6027","advanced_tab":"\u9ad8\u7d1a\u5c6c\u6027","events_tab":"\u4e8b\u4ef6","popup_tab":"\u5f48\u51fa\u8996\u7a97","general_tab":"\u4e00\u822c",list:"\u8d85\u9023\u7d50\u6e05\u55ae","is_external":"\u60a8\u8f38\u5165\u7684 URL \u4f3c\u4e4e\u70ba\u5916\u90e8\u8d85\u9023\u7d50\uff0c\u662f\u5426\u8981\u52a0\u4e0a \"http://\" \uff1f","is_email":"\u60a8\u8f38\u5165\u7684\u4f3c\u4e4e\u662f\u96fb\u5b50\u90f5\u4ef6\u5730\u5740,\u662f\u5426\u9700\u8981\u52a0 mailto:\uff1f",titlefield:"\u6a19\u984c",target:"\u76ee\u6a19",url:"\u8d85\u9023\u7d50URL",title:"\u63d2\u5165/\u7de8\u8f2f\u8d85\u9023\u7d50","link_list":"\u8d85\u9023\u7d50\u6e05\u55ae",rtl:"\u5f9e\u53f3\u5230\u5de6",ltr:"\u5f9e\u5de6\u5230\u53f3",accesskey:"\u5feb\u901f\u9375",tabindex:"Tab\u7d22\u5f15",rev:"\u76ee\u6a19\u5230\u7db2\u9801\u7684\u95dc\u4fc2",rel:"\u7db2\u9801\u5230\u76ee\u6a19\u7684\u95dc\u4fc2",mime:"\u76ee\u6a19MIME\u985e\u578b",encoding:"\u76ee\u6a19\u8a9e\u8a00\u7de8\u78bc",langcode:"\u8a9e\u8a00\u7de8\u78bc","target_langcode":"\u76ee\u6a19\u8a9e\u8a00",width:"Width",height:"Height"}); | ||
testNumberField.js | var fieldTests = require('./commonFieldTestUtils.js');
module.exports = {
before: fieldTests.before,
after: fieldTests.after,
'Number field should show correctly in the initial modal': function (browser) {
browser.app.openFieldList('Number');
browser.listPage.createFirstItem();
browser.app.waitForInitialFormScreen();
browser.initialFormPage.assertUI({
listName: 'Number',
fields: ['name', 'fieldA']
});
},
'restoring test state': function(browser) {
browser.initialFormPage.cancel();
browser.app.waitForListScreen();
},
'Number field can be filled via the initial modal': function(browser) {
browser.app.openFieldList('Number');
browser.listPage.createFirstItem();
browser.app.waitForInitialFormScreen();
browser.initialFormPage.fillInputs({
listName: 'Number',
fields: {
'name': {value: 'Number Field Test 1'},
'fieldA': {value: '1'},
}
});
browser.initialFormPage.assertInputs({
listName: 'Number',
fields: {
'name': {value: 'Number Field Test 1'},
'fieldA': {value: '1'},
}
});
browser.initialFormPage.save();
browser.app.waitForItemScreen();
browser.itemPage.assertInputs({
listName: 'Number',
fields: {
'name': {value: 'Number Field Test 1'},
'fieldA': {value: '1'},
}
})
},
'Number field should show correctly in the edit form': function(browser) {
browser.itemPage.assertUI({
listName: 'Number',
fields: ['fieldA', 'fieldB']
});
},
'Number field can be filled via the edit form': function(browser) {
browser.itemPage.fillInputs({
listName: 'Number',
fields: {
'fieldB': {value: '2'}
}
});
browser.itemPage.save();
browser.app.waitForItemScreen(); | 'name': {value: 'Number Field Test 1'},
'fieldA': {value: '1'},
'fieldB': {value: '2'}
}
})
},
}; | browser.itemPage.assertFlashMessage('Your changes have been saved successfully');
browser.itemPage.assertInputs({
listName: 'Number',
fields: { |
metadata.go | package builder
import (
"github.com/puppetlabs/relay-core/pkg/manager/reject"
"github.com/puppetlabs/relay-core/pkg/model"
)
type metadataManagers struct {
actionMetadata model.ActionMetadataManager
connections model.ConnectionManager
conditions model.ConditionGetterManager
events model.EventManager
environment model.EnvironmentGetterManager
logs model.LogManager
parameters model.ParameterGetterManager
secrets model.SecretManager
spec model.SpecGetterManager
state model.StateGetterManager
stepDecorators model.StepDecoratorManager
stepOutputs model.StepOutputManager
workflowRuns model.WorkflowRunManager
timers model.TimerSetterManager
}
var _ model.MetadataManagers = &metadataManagers{}
func (mm *metadataManagers) ActionMetadata() model.ActionMetadataManager {
return mm.actionMetadata
}
func (mm *metadataManagers) Connections() model.ConnectionManager {
return mm.connections
}
func (mm *metadataManagers) Conditions() model.ConditionGetterManager {
return mm.conditions
}
func (mm *metadataManagers) Events() model.EventManager {
return mm.events
}
func (mm *metadataManagers) Environment() model.EnvironmentGetterManager {
return mm.environment
}
func (mm *metadataManagers) Logs() model.LogManager {
return mm.logs
}
func (mm *metadataManagers) Parameters() model.ParameterGetterManager {
return mm.parameters
}
func (mm *metadataManagers) Secrets() model.SecretManager {
return mm.secrets
}
func (mm *metadataManagers) Spec() model.SpecGetterManager {
return mm.spec
}
func (mm *metadataManagers) State() model.StateGetterManager {
return mm.state
}
func (mm *metadataManagers) StepDecorators() model.StepDecoratorManager {
return mm.stepDecorators
}
func (mm *metadataManagers) StepOutputs() model.StepOutputManager {
return mm.stepOutputs
}
func (mm *metadataManagers) WorkflowRuns() model.WorkflowRunManager {
return mm.workflowRuns
}
func (mm *metadataManagers) Timers() model.TimerSetterManager {
return mm.timers
}
type MetadataBuilder struct {
actionMetadata model.ActionMetadataManager
connections model.ConnectionManager
conditions model.ConditionGetterManager
events model.EventManager
environment model.EnvironmentGetterManager
logs model.LogManager
parameters model.ParameterGetterManager
secrets model.SecretManager
spec model.SpecGetterManager
state model.StateGetterManager
stepDecorators model.StepDecoratorManager
stepOutputs model.StepOutputManager
workflowRuns model.WorkflowRunManager
timers model.TimerSetterManager
}
func (mb *MetadataBuilder) SetActionMetadata(m model.ActionMetadataManager) *MetadataBuilder {
mb.actionMetadata = m
return mb
}
func (mb *MetadataBuilder) SetConnections(m model.ConnectionManager) *MetadataBuilder {
mb.connections = m
return mb
}
func (mb *MetadataBuilder) SetConditions(m model.ConditionGetterManager) *MetadataBuilder {
mb.conditions = m
return mb
}
func (mb *MetadataBuilder) SetEvents(m model.EventManager) *MetadataBuilder {
mb.events = m
return mb
}
func (mb *MetadataBuilder) SetEnvironment(m model.EnvironmentGetterManager) *MetadataBuilder {
mb.environment = m
return mb
}
func (mb *MetadataBuilder) SetLogs(m model.LogManager) *MetadataBuilder {
mb.logs = m
return mb
}
func (mb *MetadataBuilder) SetParameters(m model.ParameterGetterManager) *MetadataBuilder {
mb.parameters = m
return mb
}
func (mb *MetadataBuilder) SetSecrets(m model.SecretManager) *MetadataBuilder {
mb.secrets = m
return mb
}
func (mb *MetadataBuilder) SetSpec(m model.SpecGetterManager) *MetadataBuilder {
mb.spec = m
return mb
}
func (mb *MetadataBuilder) SetState(m model.StateGetterManager) *MetadataBuilder {
mb.state = m
return mb
}
func (mb *MetadataBuilder) SetStepDecorators(m model.StepDecoratorManager) *MetadataBuilder {
mb.stepDecorators = m
return mb
}
func (mb *MetadataBuilder) SetStepOutputs(m model.StepOutputManager) *MetadataBuilder {
mb.stepOutputs = m
return mb
}
func (mb *MetadataBuilder) SetWorkflowRuns(m model.WorkflowRunManager) *MetadataBuilder {
mb.workflowRuns = m
return mb
}
func (mb *MetadataBuilder) SetTimers(m model.TimerSetterManager) *MetadataBuilder {
mb.timers = m
return mb
}
func (mb *MetadataBuilder) Build() model.MetadataManagers {
return &metadataManagers{
actionMetadata: mb.actionMetadata,
connections: mb.connections,
conditions: mb.conditions,
events: mb.events,
environment: mb.environment,
logs: mb.logs,
parameters: mb.parameters,
secrets: mb.secrets,
spec: mb.spec,
state: mb.state,
stepDecorators: mb.stepDecorators,
stepOutputs: mb.stepOutputs,
workflowRuns: mb.workflowRuns,
timers: mb.timers,
}
}
func NewMetadataBuilder() *MetadataBuilder | {
return &MetadataBuilder{
actionMetadata: reject.ActionMetadataManager,
connections: reject.ConnectionManager,
conditions: reject.ConditionManager,
events: reject.EventManager,
environment: reject.EnvironmentManager,
logs: reject.LogManager,
parameters: reject.ParameterManager,
secrets: reject.SecretManager,
spec: reject.SpecManager,
state: reject.StateManager,
stepDecorators: reject.StepDecoratorManager,
stepOutputs: reject.StepOutputManager,
workflowRuns: reject.WorkflowRunManager,
timers: reject.TimerManager,
}
} |
|
systemfacts.py | import errno
import functools
import grp
import json
import logging
import os
import pwd
import re
from six.moves import configparser
import six
from leapp.libraries.stdlib import CalledProcessError, api, run
from leapp.models import SysctlVariablesFacts, SysctlVariable, ActiveKernelModulesFacts, ActiveKernelModule, \
KernelModuleParameter, UsersFacts, User, GroupsFacts, Group, RepositoriesFacts, RepositoryFile, RepositoryData, \
SELinuxFacts, fields, FirewallStatus, FirewallsFacts
def aslist(f):
''' Decorator used to convert generator to list '''
@functools.wraps(f)
def inner(*args, **kwargs):
return list(f(*args, **kwargs))
return inner
def anyendswith(value, ends):
''' Check if `value` ends with one of the possible `ends` '''
for end in ends:
if value.endswith(end):
return True
return False
def anyhasprefix(value, prefixes):
''' Check if `value` starts with on of the possible `prefixes` '''
for p in prefixes:
if value.startswith(p):
return True
return False
@aslist
def _get_system_users():
for p in pwd.getpwall():
yield User(
name=p.pw_name,
uid=p.pw_uid,
gid=p.pw_gid,
home=p.pw_dir
)
def get_system_users_status():
''' Get a list of users from `/etc/passwd` '''
return UsersFacts(users=_get_system_users())
@aslist
def _get_system_groups():
for g in grp.getgrall():
yield Group(
name=g.gr_name,
gid=g.gr_gid,
members=g.gr_mem
)
def get_system_groups_status():
''' Get a list of groups from `/etc/groups` '''
return GroupsFacts(groups=_get_system_groups())
@aslist
def _get_active_kernel_modules(logger):
lines = run(['lsmod'], split=True)['stdout']
for l in lines[1:]: | # Read parameters of the given module as exposed by the
# `/sys` VFS, if there are no parameters exposed we just
# take the name of the module
base_path = '/sys/module/{module}'.format(module=name)
parameters_path = os.path.join(base_path, 'parameters')
if not os.path.exists(parameters_path):
yield ActiveKernelModule(filename=name, parameters=[])
continue
# Use `modinfo` to probe for signature information
parameter_dict = {}
try:
signature = run(['modinfo', '-F', 'signature', name], split=False)['stdout']
except CalledProcessError:
signature = None
signature_string = None
if signature:
# Remove whitspace from the signature string
signature_string = re.sub(r"\s+", "", signature, flags=re.UNICODE)
# Since we're using the `/sys` VFS we need to use `os.listdir()` to get
# all the property names and then just read from all the listed paths
parameters = sorted(os.listdir(parameters_path))
for param in parameters:
try:
with open(os.path.join(parameters_path, param), mode='r') as fp:
parameter_dict[param] = fp.read().strip()
except IOError as exc:
# Some parameters are write-only, in that case we just log the name of parameter
# and the module and continue
if exc.errno in (errno.EACCES, errno.EPERM):
msg = 'Unable to read parameter "{param}" of kernel module "{name}"'
logger.warning(msg.format(param=param, name=name))
else:
raise exc
# Project the dictionary as a list of key values
items = [
KernelModuleParameter(name=k, value=v)
for (k, v) in six.iteritems(parameter_dict)
]
yield ActiveKernelModule(
filename=name,
parameters=items,
signature=signature_string
)
def get_active_kernel_modules_status(logger):
''' Get a list of active kernel modules '''
return ActiveKernelModulesFacts(kernel_modules=_get_active_kernel_modules(logger))
@aslist
def _get_sysctls():
unstable = ('fs.dentry-state', 'fs.file-nr', 'fs.inode-nr',
'fs.inode-state', 'kernel.random.uuid', 'kernel.random.entropy_avail',
'kernel.ns_last_pid', 'net.netfilter.nf_conntrack_count',
'net.netfilter.nf_conntrack_events', 'kernel.sched_domain.',
'dev.cdrom.info', 'kernel.pty.nr')
variables = []
for sc in run(['sysctl', '-a'], split=True)['stdout']:
name = sc.split(' ', 1)[0]
# if the sysctl name has an unstable prefix, we skip
if anyhasprefix(name, unstable):
continue
variables.append(sc)
# sort our variables so they can be diffed directly when needed
for var in sorted(variables):
name, value = tuple(map(type(var).strip, var.split('=')))
yield SysctlVariable(
name=name,
value=value
)
def get_sysctls_status():
r''' Get a list of stable `sysctls` variables
Note that some variables are inherently unstable and we need to blacklist
them:
diff -u <(sysctl -a 2>/dev/null | sort) <(sysctl -a 2>/dev/null | sort)\
| grep -E '^\+[a-z]'\
| cut -d' ' -f1\
| cut -d+ -f2
'''
return SysctlVariablesFacts(sysctl_variables=_get_sysctls())
@aslist
def _get_repositories():
def asbool(x):
return x == '1'
@aslist
def _parse(r):
with open(r, mode='r') as fp:
cp = configparser.ConfigParser()
cp.readfp(fp)
for section in cp.sections():
prepared = {'repoid': section, 'additional_fields': {}}
data = dict(cp.items(section))
for key in data.keys():
if key in RepositoryData.fields:
if isinstance(RepositoryData.fields[key], fields.Boolean):
data[key] = asbool(data[key])
prepared[key] = data[key]
else:
prepared['additional_fields'][key] = data[key]
prepared['additional_fields'] = json.dumps(prepared['additional_fields'])
yield RepositoryData(**prepared)
repos = run(
['find', '/etc/yum.repos.d/', '-type', 'f', '-name', '*.repo'],
split=True
)['stdout']
for repo in repos:
yield RepositoryFile(file=repo, data=_parse(repo))
def get_repositories_status():
''' Get a basic information about YUM repositories installed in the system '''
return RepositoriesFacts(repositories=_get_repositories())
def get_selinux_status():
''' Get SELinux status information '''
# will be None if something went wrong or contain SELinuxFacts otherwise
res = None
try:
import selinux
except ImportError:
api.report_error("SELinux Import Error", details="libselinux-python package must be installed.")
return res
outdata = dict({'enabled': selinux.is_selinux_enabled() == 1})
outdata['mls_enabled'] = selinux.is_selinux_mls_enabled() == 1
try:
outdata['runtime_mode'] = "enforcing" if selinux.security_getenforce() == 1 else "permissive"
# FIXME: check selinux_getenforcemode[0] (that should be return value of a underneath function)
enforce_mode = selinux.selinux_getenforcemode()[1]
if enforce_mode >= 0:
outdata['static_mode'] = "enforcing" if enforce_mode == 1 else "permissive"
else:
outdata['static_mode'] = "disabled"
outdata['policy'] = selinux.selinux_getpolicytype()[1]
except OSError:
# This happens when SELinux is disabled
# [Errno 2] No such file or directory
outdata['runtime_mode'] = 'permissive'
outdata['static_mode'] = 'disabled'
outdata['policy'] = 'targeted'
res = SELinuxFacts(**outdata)
return res
def get_firewalls_status():
''' Get firewalld status information '''
logger = logging.getLogger('get_firewalld_status')
def _get_firewall_status(service_name):
try:
ret_list = run(['systemctl', 'is-active', service_name], split=True)['stdout']
active = ret_list[0] == 'active'
except CalledProcessError:
active = False
logger.debug('The %s service is likely not active', service_name)
try:
ret_list = run(['systemctl', 'is-enabled', service_name], split=True)['stdout']
enabled = ret_list[0] == 'enabled'
except CalledProcessError:
enabled = False
logger.debug('The %s service is likely not enabled nor running', service_name)
return FirewallStatus(
active=active,
enabled=enabled,
)
return FirewallsFacts(
firewalld=_get_firewall_status('firewalld'),
iptables=_get_firewall_status('iptables'),
ip6tables=_get_firewall_status('ip6tables'),
) | name = l.split(' ')[0]
|
factory.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
//! Provides an implementation of deterministic AEAD using a set of underlying implementations.
use std::sync::Arc;
use tink_core::utils::{wrap_err, TinkError};
/// Return a [`tink_core::DeterministicAead`] primitive from the given keyset handle.
pub fn new(
h: &tink_core::keyset::Handle,
) -> Result<Box<dyn tink_core::DeterministicAead>, TinkError> {
new_with_key_manager(h, None)
}
/// Return a [`tink_core::DeterministicAead`] primitive from the given keyset handle and custom key
/// manager.
fn new_with_key_manager(
h: &tink_core::keyset::Handle,
km: Option<Arc<dyn tink_core::registry::KeyManager>>,
) -> Result<Box<dyn tink_core::DeterministicAead>, TinkError> {
let ps = h
.primitives_with_key_manager(km)
.map_err(|e| wrap_err("daead::factory: cannot obtain primitive set", e))?;
let ret = WrappedDeterministicAead::new(ps)?;
Ok(Box::new(ret))
}
/// A [`tink_core::DeterministicAead`] implementation that uses the underlying primitive set
/// for deterministic encryption and decryption.
#[derive(Clone)]
struct WrappedDeterministicAead {
ps: tink_core::primitiveset::TypedPrimitiveSet<Box<dyn tink_core::DeterministicAead>>,
}
impl WrappedDeterministicAead {
fn new(
ps: tink_core::primitiveset::PrimitiveSet,
) -> Result<WrappedDeterministicAead, TinkError> {
let entry = match &ps.primary {
None => return Err("daead::factory: no primary primitive".into()),
Some(p) => p,
};
match entry.primitive {
tink_core::Primitive::DeterministicAead(_) => {}
_ => return Err("daead::factory: not a DeterministicAEAD primitive".into()),
};
for (_, primitives) in ps.entries.iter() {
for p in primitives {
match p.primitive {
tink_core::Primitive::DeterministicAead(_) => {}
_ => return Err("daead::factory: not a DeterministicAEAD primitive".into()),
};
}
}
// The `.into()` call is only safe because we've just checked that all entries have
// the right type of primitive
Ok(WrappedDeterministicAead { ps: ps.into() })
}
}
impl tink_core::DeterministicAead for WrappedDeterministicAead {
fn encrypt_deterministically(&self, pt: &[u8], aad: &[u8]) -> Result<Vec<u8>, TinkError> {
let primary = self
.ps
.primary
.as_ref()
.ok_or_else(|| TinkError::new("no primary"))?;
let ct = primary.primitive.encrypt_deterministically(pt, aad)?;
let mut ret = Vec::with_capacity(primary.prefix.len() + ct.len());
ret.extend_from_slice(&primary.prefix);
ret.extend_from_slice(&ct);
Ok(ret)
}
fn decrypt_deterministically(&self, ct: &[u8], aad: &[u8]) -> Result<Vec<u8>, TinkError> |
}
| {
// try non-raw keys
let prefix_size = tink_core::cryptofmt::NON_RAW_PREFIX_SIZE;
if ct.len() > prefix_size {
let prefix = &ct[..prefix_size];
let ct_no_prefix = &ct[prefix_size..];
if let Some(entries) = self.ps.entries_for_prefix(prefix) {
for entry in entries {
if let Ok(pt) = entry.primitive.decrypt_deterministically(ct_no_prefix, aad) {
return Ok(pt);
}
}
}
}
// try raw keys
if let Some(entries) = self.ps.raw_entries() {
for entry in entries {
if let Ok(pt) = entry.primitive.decrypt_deterministically(ct, aad) {
return Ok(pt);
}
}
}
// nothing worked
Err("daead::factory: decryption failed".into())
} |
backtesting.py | from collections import defaultdict
from datetime import date, datetime, timedelta
from typing import Dict, List, Set, Tuple
from functools import lru_cache
from copy import copy
import traceback
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from pandas import DataFrame
from vnpy.trader.constant import Direction, Offset, Interval, Status
from vnpy.trader.database import database_manager
from vnpy.trader.object import OrderData, TradeData, BarData
from vnpy.trader.utility import round_to, extract_vt_symbol
from .template import StrategyTemplate
INTERVAL_DELTA_MAP = {
Interval.MINUTE: timedelta(minutes=1),
Interval.HOUR: timedelta(hours=1),
Interval.DAILY: timedelta(days=1),
}
class BacktestingEngine:
""""""
gateway_name = "BACKTESTING"
def __init__(self):
""""""
self.vt_symbols: List[str] = []
self.start: datetime = None
self.end: datetime = None
self.rates: Dict[str, float] = 0
self.slippages: Dict[str, float] = 0
self.sizes: Dict[str, float] = 1
self.priceticks: Dict[str, float] = 0
self.capital: float = 1_000_000
self.risk_free: float = 0.02
self.strategy: StrategyTemplate = None
self.bars: Dict[str, BarData] = {}
self.datetime: datetime = None
self.interval: Interval = None
self.days: int = 0
self.history_data: Dict[Tuple, BarData] = {}
self.dts: Set[datetime] = set()
self.limit_order_count = 0
self.limit_orders = {}
self.active_limit_orders = {}
self.trade_count = 0
self.trades = {}
self.logs = []
self.daily_results = {}
self.daily_df = None
def clear_data(self) -> None:
"""
Clear all data of last backtesting.
"""
self.strategy = None
self.bars = {}
self.datetime = None
self.limit_order_count = 0
self.limit_orders.clear()
self.active_limit_orders.clear()
self.trade_count = 0
self.trades.clear()
self.logs.clear()
self.daily_results.clear()
self.daily_df = None
def set_parameters(
self,
vt_symbols: List[str],
interval: Interval,
start: datetime,
rates: Dict[str, float],
slippages: Dict[str, float],
sizes: Dict[str, float],
priceticks: Dict[str, float],
capital: int = 0,
end: datetime = None,
risk_free: float = 0
) -> None:
""""""
self.vt_symbols = vt_symbols
self.interval = interval
self.rates = rates
self.slippages = slippages
self.sizes = sizes
self.priceticks = priceticks
self.start = start
self.end = end
self.capital = capital
self.risk_free = risk_free
def add_strategy(self, strategy_class: type, setting: dict) -> None:
""""""
self.strategy = strategy_class(
self, strategy_class.__name__, copy(self.vt_symbols), setting
)
def load_data(self) -> None:
""""""
self.output("开始加载历史数据")
if not self.end:
self.end = datetime.now()
if self.start >= self.end:
self.output("起始日期必须小于结束日期")
return
# Clear previously loaded history data
self.history_data.clear()
self.dts.clear()
# Load 30 days of data each time and allow for progress update
progress_delta = timedelta(days=30)
total_delta = self.end - self.start
interval_delta = INTERVAL_DELTA_MAP[self.interval]
for vt_symbol in self.vt_symbols:
start = self.start
end = self.start + progress_delta
progress = 0
data_count = 0
while start < self.end:
end = min(end, self.end) # Make sure end time stays within set range
data = load_bar_data(
vt_symbol,
self.interval,
start,
end
)
for bar in data:
self.dts.add(bar.datetime)
self.history_data[(bar.datetime, vt_symbol)] = bar
data_count += 1
progress += progress_delta / total_delta
progress = min(progress, 1)
progress_bar = "#" * int(progress * 10)
self.output(f"{vt_symbol}加载进度:{progress_bar} [{progress:.0%}]")
start = end + interval_delta
end += (progress_delta + interval_delta)
self.output(f"{vt_symbol}历史数据加载完成,数据量:{data_count}")
self.output("所有历史数据加载完成")
def run_backtesting(self) -> None:
""""""
self.strategy.on_init()
# Generate sorted datetime list
dts = list(self.dts)
dts.sort()
# Use the first [days] of history data for initializing strategy
day_count = 0
ix = 0
for ix, dt in enumerate(dts):
if self.datetime and dt.day != self.datetime.day:
day_count += 1
if day_count >= self.days:
break
try:
self.new_bars(dt)
except Exception:
self.output("触发异常,回测终止")
self.output(traceback.format_exc())
return
self.strategy.inited = True
self.output("策略初始化完成")
self.strategy.on_start()
self.strategy.trading = True
self.output("开始回放历史数据")
# Use the rest of history data for running backtesting
for dt in dts[ix:]:
try:
self.new_bars(dt)
except Exception:
self.output("触发异常,回测终止")
self.output(traceback.format_exc())
return
self.output("历史数据回放结束")
def calculate_result(self) -> None:
""""""
self.output("开始计算逐日盯市盈亏")
if not self.trades:
self.output("成交记录为空,无法计算")
return
# Add trade data into daily reuslt.
for trade in self.trades.values():
d = trade.datetime.date()
daily_result = self.daily_results[d]
daily_result.add_trade(trade)
# Calculate daily result by iteration.
pre_closes = {}
start_poses = {}
for daily_result in self.daily_results.values():
daily_result.calculate_pnl(
pre_closes,
start_poses,
self.sizes,
self.rates,
self.slippages,
)
pre_closes = daily_result.close_prices
start_poses = daily_result.end_poses
# Generate dataframe
results = defaultdict(list)
for daily_result in self.daily_results.values():
fields = [
"date", "trade_count", "turnover",
"commission", "slippage", "trading_pnl",
"holding_pnl", "total_pnl", "net_pnl"
]
for key in fields:
value = getattr(daily_result, key)
results[key].append(value)
self.daily_df = DataFrame.from_dict(results).set_index("date")
self.output("逐日盯市盈亏计算完成")
return self.daily_df
def calculate_statistics(self, df: DataFrame = None, output=True) -> None:
""""""
self.output("开始计算策略统计指标")
# Check DataFrame input exterior
if df is None:
df = self.daily_df
# Check for ini | if df is None:
# Set all statistics to 0 if no trade.
start_date = ""
end_date = ""
total_days = 0
profit_days = 0
loss_days = 0
end_balance = 0
max_drawdown = 0
max_ddpercent = 0
max_drawdown_duration = 0
total_net_pnl = 0
daily_net_pnl = 0
total_commission = 0
daily_commission = 0
total_slippage = 0
daily_slippage = 0
total_turnover = 0
daily_turnover = 0
total_trade_count = 0
daily_trade_count = 0
total_return = 0
annual_return = 0
daily_return = 0
return_std = 0
sharpe_ratio = 0
return_drawdown_ratio = 0
else:
# Calculate balance related time series data
df["balance"] = df["net_pnl"].cumsum() + self.capital
df["return"] = np.log(df["balance"] / df["balance"].shift(1)).fillna(0)
df["highlevel"] = (
df["balance"].rolling(
min_periods=1, window=len(df), center=False).max()
)
df["drawdown"] = df["balance"] - df["highlevel"]
df["ddpercent"] = df["drawdown"] / df["highlevel"] * 100
# Calculate statistics value
start_date = df.index[0]
end_date = df.index[-1]
total_days = len(df)
profit_days = len(df[df["net_pnl"] > 0])
loss_days = len(df[df["net_pnl"] < 0])
end_balance = df["balance"].iloc[-1]
max_drawdown = df["drawdown"].min()
max_ddpercent = df["ddpercent"].min()
max_drawdown_end = df["drawdown"].idxmin()
if isinstance(max_drawdown_end, date):
max_drawdown_start = df["balance"][:max_drawdown_end].idxmax()
max_drawdown_duration = (max_drawdown_end - max_drawdown_start).days
else:
max_drawdown_duration = 0
total_net_pnl = df["net_pnl"].sum()
daily_net_pnl = total_net_pnl / total_days
total_commission = df["commission"].sum()
daily_commission = total_commission / total_days
total_slippage = df["slippage"].sum()
daily_slippage = total_slippage / total_days
total_turnover = df["turnover"].sum()
daily_turnover = total_turnover / total_days
total_trade_count = df["trade_count"].sum()
daily_trade_count = total_trade_count / total_days
total_return = (end_balance / self.capital - 1) * 100
annual_return = total_return / total_days * 240
daily_return = df["return"].mean() * 100
return_std = df["return"].std() * 100
if return_std:
daily_risk_free = self.risk_free / np.sqrt(240)
sharpe_ratio = (daily_return - daily_risk_free) / return_std * np.sqrt(240)
else:
sharpe_ratio = 0
return_drawdown_ratio = -total_net_pnl / max_drawdown
# Output
if output:
self.output("-" * 30)
self.output(f"首个交易日:\t{start_date}")
self.output(f"最后交易日:\t{end_date}")
self.output(f"总交易日:\t{total_days}")
self.output(f"盈利交易日:\t{profit_days}")
self.output(f"亏损交易日:\t{loss_days}")
self.output(f"起始资金:\t{self.capital:,.2f}")
self.output(f"结束资金:\t{end_balance:,.2f}")
self.output(f"总收益率:\t{total_return:,.2f}%")
self.output(f"年化收益:\t{annual_return:,.2f}%")
self.output(f"最大回撤: \t{max_drawdown:,.2f}")
self.output(f"百分比最大回撤: {max_ddpercent:,.2f}%")
self.output(f"最长回撤天数: \t{max_drawdown_duration}")
self.output(f"总盈亏:\t{total_net_pnl:,.2f}")
self.output(f"总手续费:\t{total_commission:,.2f}")
self.output(f"总滑点:\t{total_slippage:,.2f}")
self.output(f"总成交金额:\t{total_turnover:,.2f}")
self.output(f"总成交笔数:\t{total_trade_count}")
self.output(f"日均盈亏:\t{daily_net_pnl:,.2f}")
self.output(f"日均手续费:\t{daily_commission:,.2f}")
self.output(f"日均滑点:\t{daily_slippage:,.2f}")
self.output(f"日均成交金额:\t{daily_turnover:,.2f}")
self.output(f"日均成交笔数:\t{daily_trade_count}")
self.output(f"日均收益率:\t{daily_return:,.2f}%")
self.output(f"收益标准差:\t{return_std:,.2f}%")
self.output(f"Sharpe Ratio:\t{sharpe_ratio:,.2f}")
self.output(f"收益回撤比:\t{return_drawdown_ratio:,.2f}")
statistics = {
"start_date": start_date,
"end_date": end_date,
"total_days": total_days,
"profit_days": profit_days,
"loss_days": loss_days,
"capital": self.capital,
"end_balance": end_balance,
"max_drawdown": max_drawdown,
"max_ddpercent": max_ddpercent,
"max_drawdown_duration": max_drawdown_duration,
"total_net_pnl": total_net_pnl,
"daily_net_pnl": daily_net_pnl,
"total_commission": total_commission,
"daily_commission": daily_commission,
"total_slippage": total_slippage,
"daily_slippage": daily_slippage,
"total_turnover": total_turnover,
"daily_turnover": daily_turnover,
"total_trade_count": total_trade_count,
"daily_trade_count": daily_trade_count,
"total_return": total_return,
"annual_return": annual_return,
"daily_return": daily_return,
"return_std": return_std,
"sharpe_ratio": sharpe_ratio,
"return_drawdown_ratio": return_drawdown_ratio,
}
# Filter potential error infinite value
for key, value in statistics.items():
if value in (np.inf, -np.inf):
value = 0
statistics[key] = np.nan_to_num(value)
self.output("策略统计指标计算完成")
return statistics
def show_chart(self, df: DataFrame = None) -> None:
""""""
# Check DataFrame input exterior
if df is None:
df = self.daily_df
# Check for init DataFrame
if df is None:
return
fig = make_subplots(
rows=4,
cols=1,
subplot_titles=["Balance", "Drawdown", "Daily Pnl", "Pnl Distribution"],
vertical_spacing=0.06
)
balance_line = go.Scatter(
x=df.index,
y=df["balance"],
mode="lines",
name="Balance"
)
drawdown_scatter = go.Scatter(
x=df.index,
y=df["drawdown"],
fillcolor="red",
fill='tozeroy',
mode="lines",
name="Drawdown"
)
pnl_bar = go.Bar(y=df["net_pnl"], name="Daily Pnl")
pnl_histogram = go.Histogram(x=df["net_pnl"], nbinsx=100, name="Days")
fig.add_trace(balance_line, row=1, col=1)
fig.add_trace(drawdown_scatter, row=2, col=1)
fig.add_trace(pnl_bar, row=3, col=1)
fig.add_trace(pnl_histogram, row=4, col=1)
fig.update_layout(height=1000, width=1000)
fig.show()
def update_daily_close(self, bars: Dict[str, BarData], dt: datetime) -> None:
""""""
d = dt.date()
close_prices = {}
for bar in bars.values():
close_prices[bar.vt_symbol] = bar.close_price
daily_result = self.daily_results.get(d, None)
if daily_result:
daily_result.update_close_prices(close_prices)
else:
self.daily_results[d] = PortfolioDailyResult(d, close_prices)
def new_bars(self, dt: datetime) -> None:
""""""
self.datetime = dt
bars: Dict[str, BarData] = {}
for vt_symbol in self.vt_symbols:
bar = self.history_data.get((dt, vt_symbol), None)
# If bar data of vt_symbol at dt exists
if bar:
# Update bar data for crossing order
self.bars[vt_symbol] = bar
# Put bar into dict for strategy.on_bars update
bars[vt_symbol] = bar
# Otherwise, use previous close to backfill
elif vt_symbol in self.bars:
old_bar = self.bars[vt_symbol]
bar = BarData(
symbol=old_bar.symbol,
exchange=old_bar.exchange,
datetime=dt,
open_price=old_bar.close_price,
high_price=old_bar.close_price,
low_price=old_bar.close_price,
close_price=old_bar.close_price,
gateway_name=old_bar.gateway_name
)
self.bars[vt_symbol] = bar
self.cross_limit_order()
self.strategy.on_bars(bars)
self.update_daily_close(self.bars, dt)
def cross_limit_order(self) -> None:
"""
Cross limit order with last bar/tick data.
"""
for order in list(self.active_limit_orders.values()):
bar = self.bars[order.vt_symbol]
long_cross_price = bar.low_price
short_cross_price = bar.high_price
long_best_price = bar.open_price
short_best_price = bar.open_price
# Push order update with status "not traded" (pending).
if order.status == Status.SUBMITTING:
order.status = Status.NOTTRADED
self.strategy.update_order(order)
# Check whether limit orders can be filled.
long_cross = (
order.direction == Direction.LONG
and order.price >= long_cross_price
and long_cross_price > 0
)
short_cross = (
order.direction == Direction.SHORT
and order.price <= short_cross_price
and short_cross_price > 0
)
if not long_cross and not short_cross:
continue
# Push order update with status "all traded" (filled).
order.traded = order.volume
order.status = Status.ALLTRADED
self.strategy.update_order(order)
self.active_limit_orders.pop(order.vt_orderid)
# Push trade update
self.trade_count += 1
if long_cross:
trade_price = min(order.price, long_best_price)
else:
trade_price = max(order.price, short_best_price)
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=str(self.trade_count),
direction=order.direction,
offset=order.offset,
price=trade_price,
volume=order.volume,
datetime=self.datetime,
gateway_name=self.gateway_name,
)
self.strategy.update_trade(trade)
self.trades[trade.vt_tradeid] = trade
def load_bars(
self,
strategy: StrategyTemplate,
days: int,
interval: Interval
) -> None:
""""""
self.days = days
def send_order(
self,
strategy: StrategyTemplate,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool,
net: bool
) -> List[str]:
""""""
price = round_to(price, self.priceticks[vt_symbol])
symbol, exchange = extract_vt_symbol(vt_symbol)
self.limit_order_count += 1
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=str(self.limit_order_count),
direction=direction,
offset=offset,
price=price,
volume=volume,
status=Status.SUBMITTING,
datetime=self.datetime,
gateway_name=self.gateway_name,
)
self.active_limit_orders[order.vt_orderid] = order
self.limit_orders[order.vt_orderid] = order
return [order.vt_orderid]
def cancel_order(self, strategy: StrategyTemplate, vt_orderid: str) -> None:
"""
Cancel order by vt_orderid.
"""
if vt_orderid not in self.active_limit_orders:
return
order = self.active_limit_orders.pop(vt_orderid)
order.status = Status.CANCELLED
self.strategy.update_order(order)
def write_log(self, msg: str, strategy: StrategyTemplate = None) -> None:
"""
Write log message.
"""
msg = f"{self.datetime}\t{msg}"
self.logs.append(msg)
def send_email(self, msg: str, strategy: StrategyTemplate = None) -> None:
"""
Send email to default receiver.
"""
pass
def sync_strategy_data(self, strategy: StrategyTemplate) -> None:
"""
Sync strategy data into json file.
"""
pass
def put_strategy_event(self, strategy: StrategyTemplate) -> None:
"""
Put an event to update strategy status.
"""
pass
def output(self, msg) -> None:
"""
Output message of backtesting engine.
"""
print(f"{datetime.now()}\t{msg}")
def get_all_trades(self) -> List[TradeData]:
"""
Return all trade data of current backtesting result.
"""
return list(self.trades.values())
def get_all_orders(self) -> List[OrderData]:
"""
Return all limit order data of current backtesting result.
"""
return list(self.limit_orders.values())
def get_all_daily_results(self) -> List["PortfolioDailyResult"]:
"""
Return all daily result data.
"""
return list(self.daily_results.values())
class ContractDailyResult:
""""""
def __init__(self, result_date: date, close_price: float):
""""""
self.date: date = result_date
self.close_price: float = close_price
self.pre_close: float = 0
self.trades: List[TradeData] = []
self.trade_count: int = 0
self.start_pos: float = 0
self.end_pos: float = 0
self.turnover: float = 0
self.commission: float = 0
self.slippage: float = 0
self.trading_pnl: float = 0
self.holding_pnl: float = 0
self.total_pnl: float = 0
self.net_pnl: float = 0
def add_trade(self, trade: TradeData) -> None:
""""""
self.trades.append(trade)
def calculate_pnl(
self,
pre_close: float,
start_pos: float,
size: int,
rate: float,
slippage: float
) -> None:
""""""
# If no pre_close provided on the first day,
# use value 1 to avoid zero division error
if pre_close:
self.pre_close = pre_close
else:
self.pre_close = 1
# Holding pnl is the pnl from holding position at day start
self.start_pos = start_pos
self.end_pos = start_pos
self.holding_pnl = self.start_pos * (self.close_price - self.pre_close) * size
# Trading pnl is the pnl from new trade during the day
self.trade_count = len(self.trades)
for trade in self.trades:
if trade.direction == Direction.LONG:
pos_change = trade.volume
else:
pos_change = -trade.volume
self.end_pos += pos_change
turnover = trade.volume * size * trade.price
self.trading_pnl += pos_change * (self.close_price - trade.price) * size
self.slippage += trade.volume * size * slippage
self.turnover += turnover
self.commission += turnover * rate
# Net pnl takes account of commission and slippage cost
self.total_pnl = self.trading_pnl + self.holding_pnl
self.net_pnl = self.total_pnl - self.commission - self.slippage
def update_close_price(self, close_price: float) -> None:
""""""
self.close_price = close_price
class PortfolioDailyResult:
""""""
def __init__(self, result_date: date, close_prices: Dict[str, float]):
""""""
self.date: date = result_date
self.close_prices: Dict[str, float] = close_prices
self.pre_closes: Dict[str, float] = {}
self.start_poses: Dict[str, float] = {}
self.end_poses: Dict[str, float] = {}
self.contract_results: Dict[str, ContractDailyResult] = {}
for vt_symbol, close_price in close_prices.items():
self.contract_results[vt_symbol] = ContractDailyResult(result_date, close_price)
self.trade_count: int = 0
self.turnover: float = 0
self.commission: float = 0
self.slippage: float = 0
self.trading_pnl: float = 0
self.holding_pnl: float = 0
self.total_pnl: float = 0
self.net_pnl: float = 0
def add_trade(self, trade: TradeData) -> None:
""""""
contract_result = self.contract_results[trade.vt_symbol]
contract_result.add_trade(trade)
def calculate_pnl(
self,
pre_closes: Dict[str, float],
start_poses: Dict[str, float],
sizes: Dict[str, float],
rates: Dict[str, float],
slippages: Dict[str, float],
) -> None:
""""""
self.pre_closes = pre_closes
for vt_symbol, contract_result in self.contract_results.items():
contract_result.calculate_pnl(
pre_closes.get(vt_symbol, 0),
start_poses.get(vt_symbol, 0),
sizes[vt_symbol],
rates[vt_symbol],
slippages[vt_symbol]
)
self.trade_count += contract_result.trade_count
self.turnover += contract_result.turnover
self.commission += contract_result.commission
self.slippage += contract_result.slippage
self.trading_pnl += contract_result.trading_pnl
self.holding_pnl += contract_result.holding_pnl
self.total_pnl += contract_result.total_pnl
self.net_pnl += contract_result.net_pnl
self.end_poses[vt_symbol] = contract_result.end_pos
def update_close_prices(self, close_prices: Dict[str, float]) -> None:
""""""
self.close_prices = close_prices
for vt_symbol, close_price in close_prices.items():
contract_result = self.contract_results.get(vt_symbol, None)
if contract_result:
contract_result.update_close_price(close_price)
@lru_cache(maxsize=999)
def load_bar_data(
vt_symbol: str,
interval: Interval,
start: datetime,
end: datetime
):
""""""
symbol, exchange = extract_vt_symbol(vt_symbol)
return database_manager.load_bar_data(
symbol, exchange, interval, start, end
)
| t DataFrame
|
events.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
### THIS FILE WAS GENERATED BY generate_classes.py - DO NOT EDIT ###
### (Generated on 2018-11-01 23:48:48.307368) ###
from . import base_classes
class SourceOrderChanged(base_classes.Baseevents):
"""Scene items have been reordered.
:Returns:
*name*
type: String
Name of the scene where items have been reordered.
*sources*
type: Array
Array of sources.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SourceOrderChanged"
self.datain["name"] = None
self.datain["sources"] = None
def getName(self):
return self.datain["name"]
def getSources(self):
return self.datain["sources"]
class SceneItemTransformChanged(base_classes.Baseevents):
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneItemTransformChanged"
self.datain["scene-name"] = None
self.datain["item-name"] = None
self.datain["item-id"] = None
self.datain["transform"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getItemName(self):
return self.datain["item-name"]
def getItemId(self):
return self.datain["item-id"]
def getItemTransform(self):
return self.datain["transform"]
class SceneItemAdded(base_classes.Baseevents):
"""An item has been added to the current scene.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item added to the scene.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneItemAdded"
self.datain["scene-name"] = None
self.datain["item-name"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getItemName(self):
return self.datain["item-name"]
class SceneItemRemoved(base_classes.Baseevents):
"""An item has been removed from the current scene.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item removed from the scene.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneItemRemoved"
self.datain["scene-name"] = None
self.datain["item-name"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getItemName(self):
return self.datain["item-name"]
class SceneItemVisibilityChanged(base_classes.Baseevents):
"""An item's visibility has been toggled.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item in the scene.
*item_visible*
type: boolean
New visibility state of the item.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneItemVisibilityChanged"
self.datain["scene-name"] = None
self.datain["item-name"] = None
self.datain["item-visible"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getItemName(self):
return self.datain["item-name"]
def getItemVisible(self):
return self.datain["item-visible"]
class PreviewSceneChanged(base_classes.Baseevents):
"""The selected preview scene has changed (only available in Studio Mode).
:Returns:
*scene_name*
type: String
Name of the scene being previewed.
*sources*
type: Source|Array
List of sources composing the scene. Same specification as [`GetCurrentScene`](#getcurrentscene).
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "PreviewSceneChanged"
self.datain["scene-name"] = None
self.datain["sources"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getSources(self):
return self.datain["sources"]
class StudioModeSwitched(base_classes.Baseevents):
"""Studio Mode has been enabled or disabled.
:Returns:
*new_state*
type: boolean
The new enabled state of Studio Mode.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StudioModeSwitched"
self.datain["new-state"] = None
def getNewState(self):
return self.datain["new-state"]
class ReplayStarting(base_classes.Baseevents):
"""A request to start the replay buffer has been issued.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ReplayStarting"
class ReplayStarted(base_classes.Baseevents):
"""Replay Buffer started successfully
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ReplayStarted"
class ReplayStopping(base_classes.Baseevents):
"""A request to stop the replay buffer has been issued.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ReplayStopping"
class ReplayStopped(base_classes.Baseevents):
"""Replay Buffer stopped successfully
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ReplayStopped"
class SwitchScenes(base_classes.Baseevents):
"""Indicates a scene change.
:Returns:
*scene_name*
type: String
The new scene.
*sources*
type: Array
List of sources in the new scene.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SwitchScenes"
self.datain["scene-name"] = None
self.datain["sources"] = None
def getSceneName(self):
return self.datain["scene-name"]
def getSources(self):
return self.datain["sources"]
class ScenesChanged(base_classes.Baseevents):
"""The scene list has been modified.
Scenes have been added, removed, or renamed.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ScenesChanged"
class SceneCollectionChanged(base_classes.Baseevents):
"""Triggered when switching to another scene collection or when renaming the current scene collection.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneCollectionChanged"
class SceneCollectionListChanged(base_classes.Baseevents):
"""Triggered when a scene collection is created, added, renamed, or removed.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SceneCollectionListChanged"
class ProfileChanged(base_classes.Baseevents):
"""Triggered when switching to another profile or when renaming the current profile.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ProfileChanged"
class ProfileListChanged(base_classes.Baseevents):
"""Triggered when a profile is created, added, renamed, or removed.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "ProfileListChanged"
class Heartbeat(base_classes.Baseevents):
"""Emitted every 2 seconds after enabling it by calling SetHeartbeat.
:Returns:
*pulse*
type: boolean
Toggles between every JSON message as an "I am alive" indicator.
*current_profile*
type: string (optional)
Current active profile.
*current_scene*
type: string (optional)
Current active scene.
*streaming*
type: boolean (optional)
Current streaming state.
*total_stream_time*
type: int (optional)
Total time (in seconds) since the stream started.
*total_stream_bytes*
type: int (optional)
Total bytes sent since the stream started.
*total_stream_frames*
type: int (optional)
Total frames streamed since the stream started.
*recording*
type: boolean (optional)
Current recording state.
*total_record_time*
type: int (optional)
Total time (in seconds) since recording started.
*total_record_bytes*
type: int (optional)
Total bytes recorded since the recording started.
*total_record_frames*
type: int (optional)
Total frames recorded since the recording started.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "Heartbeat"
self.datain["pulse"] = None
self.datain["current-profile"] = None
self.datain["current-scene"] = None
self.datain["streaming"] = None
self.datain["total-stream-time"] = None
self.datain["total-stream-bytes"] = None
self.datain["total-stream-frames"] = None
self.datain["recording"] = None
self.datain["total-record-time"] = None
self.datain["total-record-bytes"] = None
self.datain["total-record-frames"] = None
def getPulse(self):
return self.datain["pulse"]
def getCurrentProfile(self):
return self.datain["current-profile"]
def getCurrentScene(self):
return self.datain["current-scene"]
def getStreaming(self):
return self.datain["streaming"]
def getTotalStreamTime(self):
return self.datain["total-stream-time"]
def getTotalStreamBytes(self):
return self.datain["total-stream-bytes"]
def getTotalStreamFrames(self):
return self.datain["total-stream-frames"]
def getRecording(self):
return self.datain["recording"]
def getTotalRecordTime(self):
return self.datain["total-record-time"]
def getTotalRecordBytes(self):
return self.datain["total-record-bytes"]
def getTotalRecordFrames(self):
return self.datain["total-record-frames"]
class RecordingStarting(base_classes.Baseevents):
"""A request to start recording has been issued.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "RecordingStarting"
class RecordingStarted(base_classes.Baseevents):
"""Recording started successfully.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "RecordingStarted"
class RecordingStopping(base_classes.Baseevents):
"""A request to stop recording has been issued.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "RecordingStopping"
class RecordingStopped(base_classes.Baseevents):
"""Recording stopped successfully.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "RecordingStopped"
class StreamStarting(base_classes.Baseevents):
"""A request to start streaming has been issued.
:Returns:
*preview_only*
type: boolean
Always false (retrocompatibility).
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStarting"
self.datain["preview-only"] = None
def getPreviewOnly(self):
return self.datain["preview-only"]
class StreamStarted(base_classes.Baseevents):
"""Streaming started successfully.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStarted"
class StreamStopping(base_classes.Baseevents):
"""A request to stop streaming has been issued.
:Returns:
*preview_only*
type: boolean
Always false (retrocompatibility).
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStopping"
self.datain["preview-only"] = None
def getPreviewOnly(self):
return self.datain["preview-only"]
class StreamStopped(base_classes.Baseevents):
"""Streaming stopped successfully.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStopped"
class StreamStatus(base_classes.Baseevents):
"""Emit every 2 seconds.
:Returns:
*streaming*
type: boolean
Current streaming state.
*recording*
type: boolean
Current recording state.
*preview_only*
type: boolean
Always false (retrocompatibility).
*bytes_per_sec*
type: int
Amount of data per second (in bytes) transmitted by the stream encoder.
*kbits_per_sec*
type: int
Amount of data per second (in kilobits) transmitted by the stream encoder.
*strain*
type: double
Percentage of dropped frames.
*total_stream_time*
type: int
Total time (in seconds) since the stream started.
*num_total_frames*
type: int
Total number of frames transmitted since the stream started.
*num_dropped_frames*
type: int
Number of frames dropped by the encoder since the stream started.
*fps*
type: double
Current framerate.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "StreamStatus"
self.datain["streaming"] = None
self.datain["recording"] = None
self.datain["preview-only"] = None
self.datain["bytes-per-sec"] = None
self.datain["kbits-per-sec"] = None
self.datain["strain"] = None
self.datain["total-stream-time"] = None
self.datain["num-total-frames"] = None
self.datain["num-dropped-frames"] = None
self.datain["fps"] = None
def getStreaming(self):
return self.datain["streaming"]
def getRecording(self):
return self.datain["recording"]
def getPreviewOnly(self):
return self.datain["preview-only"]
def getBytesPerSec(self):
return self.datain["bytes-per-sec"]
def getKbitsPerSec(self):
return self.datain["kbits-per-sec"]
def getStrain(self):
return self.datain["strain"]
def getTotalStreamTime(self):
return self.datain["total-stream-time"]
def getNumTotalFrames(self):
return self.datain["num-total-frames"]
def getNumDroppedFrames(self):
return self.datain["num-dropped-frames"]
def getFps(self):
return self.datain["fps"]
class Exiting(base_classes.Baseevents):
"""OBS is exiting.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "Exiting"
| """The active transition has been changed.
:Returns:
*transition_name*
type: String
The name of the new active transition.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "SwitchTransition"
self.datain["transition-name"] = None
def getTransitionName(self):
return self.datain["transition-name"]
class TransitionListChanged(base_classes.Baseevents):
"""The list of available transitions has been modified.
Transitions have been added, removed, or renamed.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "TransitionListChanged"
class TransitionDurationChanged(base_classes.Baseevents):
"""The active transition duration has been changed.
:Returns:
*new_duration*
type: int
New transition duration.
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "TransitionDurationChanged"
self.datain["new-duration"] = None
def getNewDuration(self):
return self.datain["new-duration"]
class TransitionBegin(base_classes.Baseevents):
"""A transition (other than "cut") has begun.
:Returns:
*name*
type: String
Transition name.
*duration*
type: int
Transition duration (in milliseconds).
*from_scene*
type: String
Source scene of the transition
*to_scene*
type: String
Destination scene of the transition
"""
def __init__(self):
base_classes.Baseevents.__init__(self)
self.name = "TransitionBegin"
self.datain["name"] = None
self.datain["duration"] = None
self.datain["from-scene"] = None
self.datain["to-scene"] = None
def getName(self):
return self.datain["name"]
def getDuration(self):
return self.datain["duration"]
def getFromScene(self):
return self.datain["from-scene"]
def getToScene(self):
return self.datain["to-scene"] |
class SwitchTransition(base_classes.Baseevents): |
dictdiffer.py | # -*- coding: utf-8 -*-
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
Originally posted at http://stackoverflow.com/questions/1165352/fast-comparison-between-two-python-dictionary/1165552#1165552
Available at repository: https://github.com/hughdbrown/dictdiffer
Added the ability to recursively compare dictionaries
"""
from __future__ import absolute_import, print_function, unicode_literals
import copy
from collections.abc import Mapping
from salt.ext import six
def diff(current_dict, past_dict):
return DictDiffer(current_dict, past_dict)
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(list(current_dict)), set(list(past_dict))
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(
o for o in self.intersect if self.past_dict[o] != self.current_dict[o]
)
def unchanged(self):
return set(
o for o in self.intersect if self.past_dict[o] == self.current_dict[o]
)
def deep_diff(old, new, ignore=None):
ignore = ignore or []
res = {}
old = copy.deepcopy(old)
new = copy.deepcopy(new)
stack = [(old, new, False)]
while len(stack) > 0:
tmps = []
tmp_old, tmp_new, reentrant = stack.pop()
for key in set(list(tmp_old) + list(tmp_new)):
if key in tmp_old and key in tmp_new and tmp_old[key] == tmp_new[key]:
del tmp_old[key]
del tmp_new[key]
continue
if not reentrant:
if key in tmp_old and key in ignore:
del tmp_old[key]
if key in tmp_new and key in ignore:
del tmp_new[key]
if isinstance(tmp_old.get(key), Mapping) and isinstance(
tmp_new.get(key), Mapping
):
tmps.append((tmp_old[key], tmp_new[key], False))
if tmps:
stack.extend([(tmp_old, tmp_new, True)] + tmps)
if old:
res["old"] = old
if new:
res["new"] = new
return res
def recursive_diff(past_dict, current_dict, ignore_missing_keys=True):
"""
Returns a RecursiveDictDiffer object that computes the recursive diffs
between two dictionaries
past_dict
Past dictionary
current_dict
Current dictionary
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
Default is True.
"""
return RecursiveDictDiffer(past_dict, current_dict, ignore_missing_keys)
class RecursiveDictDiffer(DictDiffer):
"""
Calculates a recursive diff between the current_dict and the past_dict
creating a diff in the format
{'new': new_value, 'old': old_value}
It recursively searches differences in common keys whose values are
dictionaries creating a diff dict in the format
{'common_key' : {'new': new_value, 'old': old_value}
The class overrides all DictDiffer methods, returning lists of keys and
subkeys using the . notation (i.e 'common_key1.common_key2.changed_key')
The class provides access to:
(1) the added, removed, changes keys and subkeys (using the . notation)
``added``, ``removed``, ``changed`` methods
(2) the diffs in the format above (diff property)
``diffs`` property
(3) a dict with the new changed values only (new_values property)
``new_values`` property
(4) a dict with the old changed values only (old_values property)
``old_values`` property
(5) a string representation of the changes in the format:
``changes_str`` property
Note:
The <_null_> value is a reserved value
.. code-block:: text
common_key1:
common_key2:
changed_key1 from '<old_str>' to '<new_str>'
changed_key2 from '[<old_elem1>, ..]' to '[<new_elem1>, ..]'
common_key3:
changed_key3 from <old_int> to <new_int>
"""
NONE_VALUE = "<_null_>"
def __init__(self, past_dict, current_dict, ignore_missing_keys):
"""
past_dict
Past dictionary.
current_dict
Current dictionary.
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
"""
super(RecursiveDictDiffer, self).__init__(current_dict, past_dict)
self._diffs = self._get_diffs(
self.current_dict, self.past_dict, ignore_missing_keys
)
# Ignores unet values when assessing the changes
self.ignore_unset_values = True
@classmethod
def _get_diffs(cls, dict1, dict2, ignore_missing_keys):
"""
Returns a dict with the differences between dict1 and dict2
Notes:
Keys that only exist in dict2 are not included in the diff if
ignore_missing_keys is True, otherwise they are
Simple compares are done on lists
"""
ret_dict = {}
for p in dict1.keys():
if p not in dict2:
ret_dict.update({p: {"new": dict1[p], "old": cls.NONE_VALUE}})
elif dict1[p] != dict2[p]:
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
sub_diff_dict = cls._get_diffs(
dict1[p], dict2[p], ignore_missing_keys
)
if sub_diff_dict:
ret_dict.update({p: sub_diff_dict})
else:
ret_dict.update({p: {"new": dict1[p], "old": dict2[p]}})
if not ignore_missing_keys:
for p in dict2.keys():
if p not in dict1.keys():
ret_dict.update({p: {"new": cls.NONE_VALUE, "old": dict2[p]}})
return ret_dict
@classmethod
def _get_values(cls, diff_dict, type="new"):
"""
Returns a dictionaries with the 'new' values in a diff dict.
type
Which values to return, 'new' or 'old'
"""
ret_dict = {}
for p in diff_dict.keys():
if type in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p][type]})
else:
ret_dict.update({p: cls._get_values(diff_dict[p], type=type)})
return ret_dict
@classmethod
def _get_changes(cls, diff_dict):
"""
Returns a list of string message with the differences in a diff dict.
Each inner difference is tabulated two space deeper
"""
changes_strings = []
for p in sorted(diff_dict.keys()):
if sorted(diff_dict[p].keys()) == ["new", "old"]:
# Some string formatting
old_value = diff_dict[p]["old"]
if diff_dict[p]["old"] == cls.NONE_VALUE:
old_value = "nothing"
elif isinstance(diff_dict[p]["old"], six.string_types):
old_value = "'{0}'".format(diff_dict[p]["old"])
elif isinstance(diff_dict[p]["old"], list):
old_value = "'{0}'".format(", ".join(diff_dict[p]["old"]))
new_value = diff_dict[p]["new"]
if diff_dict[p]["new"] == cls.NONE_VALUE:
new_value = "nothing"
elif isinstance(diff_dict[p]["new"], six.string_types):
new_value = "'{0}'".format(diff_dict[p]["new"])
elif isinstance(diff_dict[p]["new"], list):
new_value = "'{0}'".format(", ".join(diff_dict[p]["new"]))
changes_strings.append(
"{0} from {1} to {2}".format(p, old_value, new_value)
)
else:
sub_changes = cls._get_changes(diff_dict[p])
if sub_changes:
changes_strings.append("{0}:".format(p))
changes_strings.extend([" {0}".format(c) for c in sub_changes])
return changes_strings
def added(self):
"""
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
"""
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and "old" not in diffs[key]:
keys.extend(
_added(diffs[key], prefix="{0}{1}.".format(prefix, key))
)
elif diffs[key]["old"] == self.NONE_VALUE:
if isinstance(diffs[key]["new"], dict):
keys.extend(
_added(
diffs[key]["new"], prefix="{0}{1}.".format(prefix, key)
)
)
else:
keys.append("{0}{1}".format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix=""))
def removed(self):
"""
Returns all keys that have been removed.
If the keys are in child dictionaries they will be represented with
. notation
"""
def _removed(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and "old" not in diffs[key]:
keys.extend(
_removed(diffs[key], prefix="{0}{1}.".format(prefix, key))
)
elif diffs[key]["new"] == self.NONE_VALUE:
keys.append("{0}{1}".format(prefix, key))
elif isinstance(diffs[key]["new"], dict):
keys.extend(
_removed(
diffs[key]["new"], prefix="{0}{1}.".format(prefix, key)
)
)
return keys
return sorted(_removed(self._diffs, prefix=""))
def changed(self):
"""
Returns all keys that have been changed.
If the keys are in child dictionaries they will be represented with
. notation
"""
def _changed(diffs, prefix):
keys = []
for key in diffs.keys():
if not isinstance(diffs[key], dict):
continue
if isinstance(diffs[key], dict) and "old" not in diffs[key]:
keys.extend(
_changed(diffs[key], prefix="{0}{1}.".format(prefix, key))
)
continue
if self.ignore_unset_values:
if (
"old" in diffs[key]
and "new" in diffs[key]
and diffs[key]["old"] != self.NONE_VALUE
and diffs[key]["new"] != self.NONE_VALUE
):
if isinstance(diffs[key]["new"], dict):
keys.extend(
_changed(
diffs[key]["new"],
prefix="{0}{1}.".format(prefix, key),
)
)
else:
keys.append("{0}{1}".format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key], prefix="{0}{1}.".format(prefix, key))
)
else:
if "old" in diffs[key] and "new" in diffs[key]:
if isinstance(diffs[key]["new"], dict):
keys.extend(
_changed(
diffs[key]["new"],
prefix="{0}{1}.".format(prefix, key),
)
)
else:
keys.append("{0}{1}".format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key], prefix="{0}{1}.".format(prefix, key))
)
return keys
return sorted(_changed(self._diffs, prefix=""))
def unchanged(self):
"""
Returns all keys that have been unchanged.
If the keys are in child dictionaries they will be represented with
. notation
"""
def _unchanged(current_dict, diffs, prefix):
keys = []
for key in current_dict.keys():
if key not in diffs:
keys.append("{0}{1}".format(prefix, key))
elif isinstance(current_dict[key], dict):
if "new" in diffs[key]:
# There is a diff
continue
else:
keys.extend( | _unchanged(
current_dict[key],
diffs[key],
prefix="{0}{1}.".format(prefix, key),
)
)
return keys
return sorted(_unchanged(self.current_dict, self._diffs, prefix=""))
@property
def diffs(self):
"""Returns a dict with the recursive diffs current_dict - past_dict"""
return self._diffs
@property
def new_values(self):
"""Returns a dictionary with the new values"""
return self._get_values(self._diffs, type="new")
@property
def old_values(self):
"""Returns a dictionary with the old values"""
return self._get_values(self._diffs, type="old")
@property
def changes_str(self):
"""Returns a string describing the changes"""
return "\n".join(self._get_changes(self._diffs)) | |
network-listener.ts | import * as cxschema from '@aws-cdk/cloud-assembly-schema';
import { Duration, IResource, Resource } from '@aws-cdk/core';
import { Construct } from 'constructs';
import { BaseListener, BaseListenerLookupOptions } from '../shared/base-listener';
import { HealthCheck } from '../shared/base-target-group';
import { Protocol, SslPolicy } from '../shared/enums';
import { IListenerCertificate } from '../shared/listener-certificate';
import { validateNetworkProtocol } from '../shared/util';
import { NetworkListenerAction } from './network-listener-action';
import { INetworkLoadBalancer } from './network-load-balancer';
import { INetworkLoadBalancerTarget, INetworkTargetGroup, NetworkTargetGroup } from './network-target-group';
/**
* Basic properties for a Network Listener
*/
export interface BaseNetworkListenerProps {
/**
* The port on which the listener listens for requests.
*/
readonly port: number;
/**
* Default target groups to load balance to
*
* All target groups will be load balanced to with equal weight and without
* stickiness. For a more complex configuration than that, use
* either `defaultAction` or `addAction()`.
*
* Cannot be specified together with `defaultAction`.
*
* @default - None.
*/
readonly defaultTargetGroups?: INetworkTargetGroup[];
/**
* Default action to take for requests to this listener
*
* This allows full control of the default Action of the load balancer,
* including weighted forwarding. See the `NetworkListenerAction` class for
* all options.
*
* Cannot be specified together with `defaultTargetGroups`.
*
* @default - None.
*/
readonly defaultAction?: NetworkListenerAction;
/**
* Protocol for listener, expects TCP, TLS, UDP, or TCP_UDP.
*
* @default - TLS if certificates are provided. TCP otherwise.
*/
readonly protocol?: Protocol;
/**
* Certificate list of ACM cert ARNs
*
* @default - No certificates.
*/
readonly certificates?: IListenerCertificate[];
/**
* SSL Policy
*
* @default - Current predefined security policy.
*/
readonly sslPolicy?: SslPolicy;
}
/**
* Properties for adding a certificate to a listener
*
* This interface exists for backwards compatibility.
*
* @deprecated Use IListenerCertificate instead
*/
export interface INetworkListenerCertificateProps extends IListenerCertificate {
}
/**
* Properties for a Network Listener attached to a Load Balancer
*/
export interface NetworkListenerProps extends BaseNetworkListenerProps {
/**
* The load balancer to attach this listener to
*/
readonly loadBalancer: INetworkLoadBalancer;
}
/**
* Options for looking up a network listener.
*/
export interface NetworkListenerLookupOptions extends BaseListenerLookupOptions {
/**
* Protocol of the listener port
* @default - listener is not filtered by protocol
*/
readonly listenerProtocol?: Protocol;
}
/**
* Define a Network Listener
*
* @resource AWS::ElasticLoadBalancingV2::Listener
*/
export class NetworkListener extends BaseListener implements INetworkListener {
/**
* Looks up a network listener
*/
public static fromLookup(scope: Construct, id: string, options: NetworkListenerLookupOptions): INetworkListener {
let listenerProtocol: cxschema.LoadBalancerListenerProtocol | undefined;
if (options.listenerProtocol) {
validateNetworkProtocol(options.listenerProtocol);
switch (options.listenerProtocol) {
case Protocol.TCP: listenerProtocol = cxschema.LoadBalancerListenerProtocol.TCP; break;
case Protocol.UDP: listenerProtocol = cxschema.LoadBalancerListenerProtocol.UDP; break;
case Protocol.TCP_UDP: listenerProtocol = cxschema.LoadBalancerListenerProtocol.TCP_UDP; break;
case Protocol.TLS: listenerProtocol = cxschema.LoadBalancerListenerProtocol.TLS; break;
}
}
const props = BaseListener._queryContextProvider(scope, {
userOptions: options,
listenerProtocol: listenerProtocol,
loadBalancerType: cxschema.LoadBalancerType.NETWORK,
});
class LookedUp extends Resource implements INetworkListener {
public listenerArn = props.listenerArn;
}
return new LookedUp(scope, id);
}
/**
* Import an existing listener
*/
public static fromNetworkListenerArn(scope: Construct, id: string, networkListenerArn: string): INetworkListener {
class | extends Resource implements INetworkListener {
public listenerArn = networkListenerArn;
}
return new Import(scope, id);
}
/**
* The load balancer this listener is attached to
*/
public readonly loadBalancer: INetworkLoadBalancer;
/**
* the protocol of the listener
*/
private readonly protocol: Protocol;
constructor(scope: Construct, id: string, props: NetworkListenerProps) {
const certs = props.certificates || [];
const proto = props.protocol || (certs.length > 0 ? Protocol.TLS : Protocol.TCP);
validateNetworkProtocol(proto);
if (proto === Protocol.TLS && certs.filter(v => v != null).length === 0) {
throw new Error('When the protocol is set to TLS, you must specify certificates');
}
if (proto !== Protocol.TLS && certs.length > 0) {
throw new Error('Protocol must be TLS when certificates have been specified');
}
super(scope, id, {
loadBalancerArn: props.loadBalancer.loadBalancerArn,
protocol: proto,
port: props.port,
sslPolicy: props.sslPolicy,
certificates: props.certificates,
});
this.loadBalancer = props.loadBalancer;
this.protocol = proto;
if (props.defaultAction && props.defaultTargetGroups) {
throw new Error('Specify at most one of \'defaultAction\' and \'defaultTargetGroups\'');
}
if (props.defaultAction) {
this.setDefaultAction(props.defaultAction);
}
if (props.defaultTargetGroups) {
this.setDefaultAction(NetworkListenerAction.forward(props.defaultTargetGroups));
}
}
/**
* Load balance incoming requests to the given target groups.
*
* All target groups will be load balanced to with equal weight and without
* stickiness. For a more complex configuration than that, use `addAction()`.
*/
public addTargetGroups(_id: string, ...targetGroups: INetworkTargetGroup[]): void {
this.setDefaultAction(NetworkListenerAction.forward(targetGroups));
}
/**
* Perform the given Action on incoming requests
*
* This allows full control of the default Action of the load balancer,
* including weighted forwarding. See the `NetworkListenerAction` class for
* all options.
*/
public addAction(_id: string, props: AddNetworkActionProps): void {
this.setDefaultAction(props.action);
}
/**
* Load balance incoming requests to the given load balancing targets.
*
* This method implicitly creates a NetworkTargetGroup for the targets
* involved, and a 'forward' action to route traffic to the given TargetGroup.
*
* If you want more control over the precise setup, create the TargetGroup
* and use `addAction` yourself.
*
* It's possible to add conditions to the targets added in this way. At least
* one set of targets must be added without conditions.
*
* @returns The newly created target group
*/
public addTargets(id: string, props: AddNetworkTargetsProps): NetworkTargetGroup {
if (!this.loadBalancer.vpc) {
// eslint-disable-next-line max-len
throw new Error('Can only call addTargets() when using a constructed Load Balancer or imported Load Balancer with specified VPC; construct a new TargetGroup and use addTargetGroup');
}
const group = new NetworkTargetGroup(this, id + 'Group', {
deregistrationDelay: props.deregistrationDelay,
healthCheck: props.healthCheck,
port: props.port,
protocol: props.protocol ?? this.protocol,
proxyProtocolV2: props.proxyProtocolV2,
preserveClientIp: props.preserveClientIp,
targetGroupName: props.targetGroupName,
targets: props.targets,
vpc: this.loadBalancer.vpc,
});
this.addTargetGroups(id, group);
return group;
}
/**
* Wrapper for _setDefaultAction which does a type-safe bind
*/
private setDefaultAction(action: NetworkListenerAction) {
action.bind(this, this);
this._setDefaultAction(action);
}
}
/**
* Properties to reference an existing listener
*/
export interface INetworkListener extends IResource {
/**
* ARN of the listener
* @attribute
*/
readonly listenerArn: string;
}
/**
* Properties for adding a new action to a listener
*/
export interface AddNetworkActionProps {
/**
* Action to perform
*/
readonly action: NetworkListenerAction;
}
/**
* Properties for adding new network targets to a listener
*/
export interface AddNetworkTargetsProps {
/**
* The port on which the listener listens for requests.
*
* @default Determined from protocol if known
*/
readonly port: number;
/**
* Protocol for target group, expects TCP, TLS, UDP, or TCP_UDP.
*
* @default - inherits the protocol of the listener
*/
readonly protocol?: Protocol;
/**
* The targets to add to this target group.
*
* Can be `Instance`, `IPAddress`, or any self-registering load balancing
* target. If you use either `Instance` or `IPAddress` as targets, all
* target must be of the same type.
*/
readonly targets?: INetworkLoadBalancerTarget[];
/**
* The name of the target group.
*
* This name must be unique per region per account, can have a maximum of
* 32 characters, must contain only alphanumeric characters or hyphens, and
* must not begin or end with a hyphen.
*
* @default Automatically generated
*/
readonly targetGroupName?: string;
/**
* The amount of time for Elastic Load Balancing to wait before deregistering a target.
*
* The range is 0-3600 seconds.
*
* @default Duration.minutes(5)
*/
readonly deregistrationDelay?: Duration;
/**
* Indicates whether Proxy Protocol version 2 is enabled.
*
* @default false
*/
readonly proxyProtocolV2?: boolean;
/**
* Indicates whether client IP preservation is enabled.
*
* @default false if the target group type is IP address and the
* target group protocol is TCP or TLS. Otherwise, true.
*/
readonly preserveClientIp?: boolean;
/**
* Health check configuration
*
* @default No health check
*/
readonly healthCheck?: HealthCheck;
}
| Import |
operators_test.go | // Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-2020 Datadog, Inc.
package eval
import (
"testing"
)
func TestPatternValue(t *testing.T) {
re, err := patternToRegexp("^$[]{}+?/etc/?+*.conf")
if err != nil |
if re.String() != "^\\^\\$\\[\\]\\{\\}\\+\\?/etc/\\?\\+.*\\.conf$" {
t.Fatalf("expected regexp not found: %s", re.String())
}
if _, err = patternToRegexp("*/passwd"); err == nil {
t.Fatal("only suffix wildcard are accepted")
}
if _, err = patternToRegexp("/etc/*/passwd"); err == nil {
t.Fatal("only suffix wildcard are accepted")
}
}
| {
t.Fatal(err)
} |
operations.rs | #![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::{models, API_VERSION};
#[derive(Clone)]
pub struct Client {
endpoint: String,
credential: std::sync::Arc<dyn azure_core::TokenCredential>,
scopes: Vec<String>,
pipeline: azure_core::pipeline::Pipeline,
}
#[derive(Clone)]
pub struct ClientBuilder {
credential: std::sync::Arc<dyn azure_core::TokenCredential>,
endpoint: Option<String>,
scopes: Option<Vec<String>>,
}
pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD;
impl ClientBuilder {
pub fn new(credential: std::sync::Arc<dyn azure_core::TokenCredential>) -> Self {
Self {
credential,
endpoint: None,
scopes: None,
}
}
pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self {
self.endpoint = Some(endpoint.into());
self
}
pub fn scopes(mut self, scopes: &[&str]) -> Self {
self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect());
self
}
pub fn build(self) -> Client {
let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned());
let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]);
Client::new(endpoint, self.credential, scopes)
}
}
impl Client {
pub(crate) fn endpoint(&self) -> &str {
self.endpoint.as_str()
}
pub(crate) fn token_credential(&self) -> &dyn azure_core::TokenCredential {
self.credential.as_ref()
}
pub(crate) fn scopes(&self) -> Vec<&str> {
self.scopes.iter().map(String::as_str).collect()
}
pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> Result<azure_core::Response, azure_core::Error> {
let mut context = azure_core::Context::default();
let mut request = request.into();
self.pipeline.send(&mut context, &mut request).await
}
pub fn new(endpoint: impl Into<String>, credential: std::sync::Arc<dyn azure_core::TokenCredential>, scopes: Vec<String>) -> Self {
let endpoint = endpoint.into();
let pipeline = azure_core::pipeline::Pipeline::new(
option_env!("CARGO_PKG_NAME"),
option_env!("CARGO_PKG_VERSION"),
azure_core::ClientOptions::default(),
Vec::new(),
Vec::new(),
);
Self {
endpoint,
credential,
scopes,
pipeline,
}
}
pub fn guest_configuration_assignment_reports(&self) -> guest_configuration_assignment_reports::Client {
guest_configuration_assignment_reports::Client(self.clone())
}
pub fn guest_configuration_assignments(&self) -> guest_configuration_assignments::Client {
guest_configuration_assignments::Client(self.clone())
}
pub fn operations(&self) -> operations::Client {
operations::Client(self.clone())
}
}
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
GuestConfigurationAssignments_Get(#[from] guest_configuration_assignments::get::Error),
#[error(transparent)]
GuestConfigurationAssignments_CreateOrUpdate(#[from] guest_configuration_assignments::create_or_update::Error),
#[error(transparent)]
GuestConfigurationAssignmentReports_List(#[from] guest_configuration_assignment_reports::list::Error),
#[error(transparent)]
GuestConfigurationAssignmentReports_Get(#[from] guest_configuration_assignment_reports::get::Error),
#[error(transparent)]
Operations_List(#[from] operations::list::Error),
}
pub mod guest_configuration_assignments {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_group_name: impl Into<String>,
guest_configuration_assignment_name: impl Into<String>,
subscription_id: impl Into<String>,
vm_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
guest_configuration_assignment_name: guest_configuration_assignment_name.into(),
subscription_id: subscription_id.into(),
vm_name: vm_name.into(),
}
}
pub fn create_or_update(
&self,
guest_configuration_assignment_name: impl Into<String>,
parameters: impl Into<models::GuestConfigurationAssignment>,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
vm_name: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
guest_configuration_assignment_name: guest_configuration_assignment_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
vm_name: vm_name.into(),
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) guest_configuration_assignment_name: String,
pub(crate) subscription_id: String,
pub(crate) vm_name: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::GuestConfigurationAssignment, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/providers/Microsoft.GuestConfiguration/guestConfigurationAssignments/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_name , & self . guest_configuration_assignment_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::GuestConfigurationAssignment =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Created201(models::GuestConfigurationAssignment),
Ok200(models::GuestConfigurationAssignment),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) guest_configuration_assignment_name: String,
pub(crate) parameters: models::GuestConfigurationAssignment,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) vm_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/providers/Microsoft.GuestConfiguration/guestConfigurationAssignments/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_name , & self . guest_configuration_assignment_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::GuestConfigurationAssignment =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::GuestConfigurationAssignment =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod guest_configuration_assignment_reports {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(
&self,
resource_group_name: impl Into<String>,
guest_configuration_assignment_name: impl Into<String>,
subscription_id: impl Into<String>,
vm_name: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
guest_configuration_assignment_name: guest_configuration_assignment_name.into(),
subscription_id: subscription_id.into(),
vm_name: vm_name.into(),
}
}
pub fn get(
&self,
resource_group_name: impl Into<String>,
guest_configuration_assignment_name: impl Into<String>,
report_id: impl Into<String>,
subscription_id: impl Into<String>,
vm_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
guest_configuration_assignment_name: guest_configuration_assignment_name.into(),
report_id: report_id.into(),
subscription_id: subscription_id.into(),
vm_name: vm_name.into(),
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) guest_configuration_assignment_name: String,
pub(crate) subscription_id: String,
pub(crate) vm_name: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::GuestConfigurationAssignmentReportList, Error>>
{
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/providers/Microsoft.GuestConfiguration/guestConfigurationAssignments/{}/reports" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_name , & self . guest_configuration_assignment_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::GuestConfigurationAssignmentReportList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) guest_configuration_assignment_name: String,
pub(crate) report_id: String,
pub(crate) subscription_id: String,
pub(crate) vm_name: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::GuestConfigurationAssignmentReport, Error>> {
Box::pin(async move { | let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/providers/Microsoft.GuestConfiguration/guestConfigurationAssignments/{}/reports/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . vm_name , & self . guest_configuration_assignment_name , & self . report_id) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::GuestConfigurationAssignmentReport =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod operations {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self) -> list::Builder {
list::Builder { client: self.0.clone() }
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationList, Error>> {
Box::pin(async move {
let url_str = &format!("{}/providers/Microsoft.GuestConfiguration/operations", self.client.endpoint(),);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
} | |
run.go | package cli
import (
"os"
"path/filepath"
"time"
"github.com/sirupsen/logrus"
"github.com/suzuki-shunsuke/buildflow/pkg/config"
"github.com/suzuki-shunsuke/buildflow/pkg/controller"
"github.com/suzuki-shunsuke/buildflow/pkg/execute"
"github.com/suzuki-shunsuke/buildflow/pkg/file"
"github.com/suzuki-shunsuke/buildflow/pkg/github"
"github.com/suzuki-shunsuke/go-findconfig/findconfig"
"github.com/urfave/cli/v2"
"gopkg.in/yaml.v2"
)
func (runner Runner) setCLIArg(c *cli.Context, cfg config.Config) config.Config {
if owner := c.String("owner"); owner != "" {
cfg.Owner = owner
}
if repo := c.String("repo"); repo != "" {
cfg.Repo = repo
}
if token := c.String("github-token"); token != "" {
cfg.GitHubToken = token
}
if logLevel := c.String("log-level"); logLevel != "" {
cfg.LogLevel = logLevel
}
return cfg
}
func (runner Runner) importPhaseConfig(cfgPhases []config.Phase, wd string) ([]config.Phase, error) { //nolint:dupl
phases := []config.Phase{}
for _, phase := range cfgPhases {
if phase.Import == "" {
phases = append(phases, phase)
continue
}
p := phase.Import
if !filepath.IsAbs(p) {
p = filepath.Join(wd, p)
}
arr, err := func() ([]config.Phase, error) {
arr := []config.Phase{}
file, err := os.Open(p)
if err != nil {
return nil, err
}
defer file.Close()
if err := yaml.NewDecoder(file).Decode(&arr); err != nil {
return nil, err
}
return arr, nil
}()
if err != nil {
return phases, err
}
phases = append(phases, arr...)
}
return phases, nil
}
func (runner Runner) importTaskConfig(cfgTasks []config.Task, wd string) ([]config.Task, error) { //nolint:dupl
tasks := []config.Task{}
for _, task := range cfgTasks {
if task.Import == "" {
tasks = append(tasks, task)
continue
}
p := task.Import
if !filepath.IsAbs(p) {
p = filepath.Join(wd, p)
}
arr, err := func() ([]config.Task, error) {
arr := []config.Task{}
file, err := os.Open(p)
if err != nil {
return nil, err
}
defer file.Close()
if err := yaml.NewDecoder(file).Decode(&arr); err != nil {
return nil, err
}
return arr, nil
}()
if err != nil {
return nil, err
}
tasks = append(tasks, arr...)
}
return tasks, nil
}
func (runner Runner) importConfig(cfg config.Config, wd string) (config.Config, error) {
phases, err := runner.importPhaseConfig(cfg.Phases, wd)
if err != nil {
return cfg, err
}
for i, phase := range phases {
tasks, err := runner.importTaskConfig(phase.Tasks, wd)
if err != nil {
return cfg, err
}
phase.Tasks = tasks
phases[i] = phase
}
cfg.Phases = phases
return cfg, nil
}
func (runner Runner) action(c *cli.Context) error {
wd, err := os.Getwd()
if err != nil {
return err
}
reader := config.Reader{
ExistFile: findconfig.Exist, | if err != nil {
return err
}
if c, err := runner.importConfig(cfg, wd); err != nil {
return err
} else {
cfg = c
}
cfg = runner.setCLIArg(c, cfg)
cfg, err = config.Set(cfg)
if err != nil {
return err
}
ghClient := github.New(c.Context, github.ParamsNew{
Token: cfg.GitHubToken,
})
if cfg.LogLevel != "" {
lvl, err := logrus.ParseLevel(cfg.LogLevel)
if err != nil {
logrus.WithFields(logrus.Fields{
"log_level": cfg.LogLevel,
}).WithError(err).Error("the log level is invalid")
}
logrus.SetLevel(lvl)
}
logrus.WithFields(logrus.Fields{
"owner": cfg.Owner,
"repo": cfg.Repo,
"log_level": cfg.LogLevel,
}).Debug("config")
ctrl := controller.Controller{
Config: cfg,
GitHub: ghClient,
Executor: execute.New(),
Stdout: os.Stdout,
Stderr: os.Stdout,
Timer: timer{},
FileReader: file.Reader{},
FileWriter: file.Writer{},
}
return ctrl.Run(c.Context, filepath.Dir(cfgPath))
}
type timer struct{}
func (t timer) Now() time.Time {
return time.Now()
} | }
cfg, cfgPath, err := reader.FindAndRead(c.String("config"), wd) |
node.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use anyhow::Result;
use diem_types::on_chain_config::VMPublishingOption;
use std::path::Path;
pub fn | (project_path: &Path) -> Result<()> {
let publishing_option = VMPublishingOption::open();
diem_node::load_test_environment(
Some(project_path.join("nodeconfig")),
false,
Some(publishing_option),
diem_framework_releases::current_module_blobs().to_vec(),
rand::rngs::OsRng,
);
Ok(())
}
| handle |
extension.rs | fn main() | {
let q = requestty::questions![Password { extension: todo!() }];
} |
|
setup-jsdom.js | import { JSDOM } from 'jsdom';
import Enzyme from 'enzyme';
import Adapter from 'enzyme-adapter-react-16.2';
import { createElementMock } from './setup-canvas-jsdom';
Enzyme.configure({ adapter: new Adapter() });
const dom = new JSDOM('<!DOCTYPE html><html><head></head><body></body></html>', {
useAgent: 'node.js',
});
global.window = dom.window;
global.document = dom.window.document;
global.navigator = dom.window.navigator;
// needed by elements/functions using canvas:
// <Select />, <Axis />, filterTickValuesByWidth
global.document.createElement = createElementMock.bind(
null,
document.createElement.bind(document),
);
// needed by interact.js
global.Element = dom.window.Element;
// needed by <Autosizer />
global.getComputedStyle = dom.window.getComputedStyle.bind(dom.window); |
global.HTMLElement = () => {}; |
|
HumanReadableDurationFormat.js | function | (seconds) {
let time = { year: 31536000, day: 86400, hour: 3600, minute: 60, second: 1 },
res = [];
if (seconds === 0) {
return 'now';
}
for (let key in time) {
if (seconds >= time[key]) {
let val = Math.floor(seconds/time[key]);
res.push(val += val > 1 ? ' ' + key + 's' : ' ' + key);
seconds = seconds % time[key];
}
}
return res.length > 1 ? res.join(', ').replace(/,([^,]*)$/,' and'+'$1') : res[0]
}
| formatDuration |
02.js | 'use strict';
// Create a constructor for creating Rectangles.
// it should take two parameters: the sides of the rectangle
// Every rectangle should have a method called getArea() that returns its area
// Every rectangle should have a method called getCircumference() that returns its circumference
function Rectangles(width, height) {
this.width = width;
this.height = height; | Rectangles.prototype.getArea = function () {
console.log(this.width * this.height);
};
Rectangles.prototype.getCircumference = function () {
console.log((this.width + this.height) * 2);
}; | }
|
index.ts | // Copyright (c) Jupyter Development Team.
// Distributed under the terms of the Modified BSD License.
import { IRenderMime } from '@jupyterlab/rendermime-interfaces';
import { RenderedJavaScript } from '@jupyterlab/rendermime';
export const TEXT_JAVASCRIPT_MIMETYPE = 'text/javascript';
export const APPLICATION_JAVASCRIPT_MIMETYPE = 'application/javascript';
function evalInContext(
code: string,
element: Element,
document: Document,
window: Window
) {
// tslint:disable-next-line
return eval(code);
}
export class ExperimentalRenderedJavascript extends RenderedJavaScript {
render(model: IRenderMime.IMimeModel): Promise<void> {
const renderJavascript = () => {
try {
const data = model.data[this.mimeType] as string;
evalInContext(data, this.node, document, window);
return Promise.resolve();
} catch (error) {
return Promise.reject(error);
}
};
if (!model.trusted) {
// If output is not trusted or if arbitrary Javascript execution is not enabled, render an informative error message
this.node.innerHTML = `<pre>Are you sure that you want to run arbitrary Javascript within your JupyterLab session?</pre>
<button>Run</button>`;
this.node.querySelector('button').onclick = event => {
this.node.innerHTML = '';
void renderJavascript();
};
return Promise.resolve();
}
return renderJavascript();
}
}
/**
* A mime renderer factory for text/javascript data.
*/
export const rendererFactory: IRenderMime.IRendererFactory = {
safe: false,
mimeTypes: [TEXT_JAVASCRIPT_MIMETYPE, APPLICATION_JAVASCRIPT_MIMETYPE], | createRenderer: options => new ExperimentalRenderedJavascript(options)
};
const extension: IRenderMime.IExtension = {
id: '@jupyterlab/javascript-extension:factory',
rendererFactory,
rank: 0,
dataType: 'string'
};
export default extension; | |
cisco_ucs_ldap_provider_group.py | #!/usr/bin/env python
from ansible.module_utils.basic import *
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cisco_ucs_ldap_provider_group
short_description: configures ldap provider group on a cisco ucs server
version_added: 0.9.0.0
description:
- configures ldap provider group on a cisco ucs server
options:
state:
description:
- if C(present), will perform create/add/enable operation
- if C(absent), will perform delete/remove/disable operation
required: false
choices: ['present', 'absent']
default: "present"
name:
version_added: "1.4(1i)"
description: ldap provider group name
required: true
descr:
version_added: "1.4(1i)"
description: ldap provider group description
required: false
requirements: ['ucsmsdk', 'ucsm_apis']
author: "Cisco Systems Inc([email protected])"
'''
EXAMPLES = '''
- name:
cisco_ucs_ldap_provider_group:
name: "test_ldap_provider_group"
descr: "description"
state: "present"
ucs_ip: "192.168.1.1"
ucs_username: "admin"
ucs_password: "password"
'''
def _argument_mo():
return dict(
name=dict(required=True, type='str'),
descr=dict(type='str'),
)
def _argument_custom():
return dict(
state=dict(default="present",
choices=['present', 'absent'],
type='str'),
)
def _argument_connection():
return dict(
# UcsHandle
ucs_server=dict(type='dict'),
# Ucs server credentials
ucs_ip=dict(type='str'),
ucs_username=dict(default="admin", type='str'),
ucs_password=dict(type='str', no_log=True),
ucs_port=dict(default=None),
ucs_secure=dict(default=None),
ucs_proxy=dict(default=None)
)
def _ansible_module_create():
argument_spec = dict()
argument_spec.update(_argument_mo())
argument_spec.update(_argument_custom())
argument_spec.update(_argument_connection())
return AnsibleModule(argument_spec,
supports_check_mode=True)
def _get_mo_params(params):
from ansible.module_utils.cisco_ucs import UcsConnection
args = {}
for key in _argument_mo():
if params.get(key) is None:
continue |
def setup_ldap_provider_group(server, module):
from ucsm_apis.admin.ldap import ldap_provider_group_create
from ucsm_apis.admin.ldap import ldap_provider_group_delete
from ucsm_apis.admin.ldap import ldap_provider_group_exists
ansible = module.params
args_mo = _get_mo_params(ansible)
exists, mo = ldap_provider_group_exists(handle=server, **args_mo)
if ansible["state"] == "present":
if module.check_mode or exists:
return not exists
ldap_provider_group_create(handle=server, **args_mo)
else:
if module.check_mode or not exists:
return exists
ldap_provider_group_delete(server, mo.name)
return True
def setup(server, module):
result = {}
err = False
try:
result["changed"] = setup_ldap_provider_group(server, module)
except Exception as e:
err = True
result["msg"] = "setup error: %s " % str(e)
result["changed"] = False
return result, err
def main():
from ansible.module_utils.cisco_ucs import UcsConnection
module = _ansible_module_create()
conn = UcsConnection(module)
server = conn.login()
result, err = setup(server, module)
conn.logout()
if err:
module.fail_json(**result)
module.exit_json(**result)
if __name__ == '__main__':
main() | args[key] = params.get(key)
return args
|
service.go | package service
import (
"context"
"encoding/json"
"fmt"
"html/template"
"os"
"os/exec"
"path"
"regexp"
"strings"
"github.com/cockroachdb/errors"
"github.com/hashicorp/go-multierror"
batcheslib "github.com/sourcegraph/sourcegraph/lib/batches"
"github.com/sourcegraph/src-cli/internal/api"
"github.com/sourcegraph/src-cli/internal/batches"
"github.com/sourcegraph/src-cli/internal/batches/docker"
"github.com/sourcegraph/src-cli/internal/batches/executor"
"github.com/sourcegraph/src-cli/internal/batches/graphql"
"github.com/sourcegraph/src-cli/internal/batches/repozip"
)
type Service struct {
allowUnsupported bool
allowIgnored bool
allowFiles bool
client api.Client
features batches.FeatureFlags
imageCache *docker.ImageCache
}
type Opts struct {
AllowUnsupported bool
AllowIgnored bool
AllowFiles bool
Client api.Client
}
var (
ErrMalformedOnQueryOrRepository = errors.New("malformed 'on' field; missing either a repository name or a query")
)
func New(opts *Opts) *Service {
return &Service{
allowUnsupported: opts.AllowUnsupported,
allowIgnored: opts.AllowIgnored,
allowFiles: opts.AllowFiles,
client: opts.Client,
imageCache: docker.NewImageCache(),
}
}
const sourcegraphVersionQuery = `query SourcegraphVersion {
site {
productVersion
}
}
`
// getSourcegraphVersion queries the Sourcegraph GraphQL API to get the
// current version of the Sourcegraph instance.
func (svc *Service) getSourcegraphVersion(ctx context.Context) (string, error) {
var result struct {
Site struct {
ProductVersion string
}
}
ok, err := svc.client.NewQuery(sourcegraphVersionQuery).Do(ctx, &result)
if err != nil || !ok {
return "", err
}
return result.Site.ProductVersion, err
}
// DetermineFeatureFlags fetches the version of the configured Sourcegraph
// instance and then sets flags on the Service itself to use features available
// in that version, e.g. gzip compression.
func (svc *Service) DetermineFeatureFlags(ctx context.Context) error {
version, err := svc.getSourcegraphVersion(ctx)
if err != nil {
return errors.Wrap(err, "failed to query Sourcegraph version to check for available features")
}
return svc.features.SetFromVersion(version)
}
// TODO(campaigns-deprecation): this shim can be removed in Sourcegraph 4.0.
func (svc *Service) newOperations() graphql.Operations {
return graphql.NewOperations(
svc.client,
svc.features.BatchChanges,
svc.features.UseGzipCompression,
)
}
func (svc *Service) newRequest(query string, vars map[string]interface{}) api.Request {
if svc.features.UseGzipCompression {
return svc.client.NewGzippedRequest(query, vars)
}
return svc.client.NewRequest(query, vars)
}
func (svc *Service) ApplyBatchChange(ctx context.Context, spec graphql.BatchSpecID) (*graphql.BatchChange, error) {
return svc.newOperations().ApplyBatchChange(ctx, spec)
}
func (svc *Service) CreateBatchSpec(ctx context.Context, namespace, spec string, ids []graphql.ChangesetSpecID) (graphql.BatchSpecID, string, error) {
result, err := svc.newOperations().CreateBatchSpec(ctx, namespace, spec, ids)
if err != nil {
return "", "", err
}
return result.ID, result.ApplyURL, nil
}
const createChangesetSpecMutation = `
mutation CreateChangesetSpec($spec: String!) {
createChangesetSpec(changesetSpec: $spec) {
... on HiddenChangesetSpec {
id
}
... on VisibleChangesetSpec {
id
}
}
}
`
func (svc *Service) CreateChangesetSpec(ctx context.Context, spec *batcheslib.ChangesetSpec) (graphql.ChangesetSpecID, error) {
raw, err := json.Marshal(spec)
if err != nil {
return "", errors.Wrap(err, "marshalling changeset spec JSON")
}
var result struct {
CreateChangesetSpec struct {
ID string
}
}
if ok, err := svc.newRequest(createChangesetSpecMutation, map[string]interface{}{
"spec": string(raw),
}).Do(ctx, &result); err != nil || !ok {
return "", err
}
return graphql.ChangesetSpecID(result.CreateChangesetSpec.ID), nil
}
// EnsureDockerImages iterates over the steps within the batch spec to ensure the
// images exist and to determine the exact content digest to be used when running
// each step, including any required by the service itself.
//
// Progress information is reported back to the given progress function: perc
// will be a value between 0.0 and 1.0, inclusive.
func (svc *Service) EnsureDockerImages(ctx context.Context, spec *batcheslib.BatchSpec, progress func(done, total int)) (map[string]docker.Image, error) {
total := len(spec.Steps)
progress(0, total)
// TODO: this _really_ should be parallelised, since the image cache takes
// care to only pull the same image once.
images := make(map[string]docker.Image)
for i := range spec.Steps {
img, err := svc.EnsureImage(ctx, spec.Steps[i].Container)
if err != nil {
return nil, err
}
images[spec.Steps[i].Container] = img
progress(i+1, total)
}
return images, nil
}
func (svc *Service) HasDockerImages(spec *batcheslib.BatchSpec) bool {
return len(spec.Steps) > 0
}
func (svc *Service) EnsureImage(ctx context.Context, name string) (docker.Image, error) {
img := svc.imageCache.Get(name)
if err := img.Ensure(ctx); err != nil {
return nil, errors.Wrapf(err, "pulling image %q", name)
}
return img, nil
}
func (svc *Service) DetermineWorkspaces(ctx context.Context, repos []*graphql.Repository, spec *batcheslib.BatchSpec) ([]RepoWorkspace, error) {
return findWorkspaces(ctx, spec, svc, repos)
}
func (svc *Service) BuildTasks(ctx context.Context, spec *batcheslib.BatchSpec, workspaces []RepoWorkspace) []*executor.Task {
return buildTasks(ctx, spec, workspaces)
}
func (svc *Service) NewCoordinator(opts executor.NewCoordinatorOpts) *executor.Coordinator {
opts.RepoArchiveRegistry = repozip.NewArchiveRegistry(svc.client, opts.CacheDir, opts.CleanArchives)
opts.Features = svc.features
opts.EnsureImage = svc.EnsureImage
return executor.NewCoordinator(opts)
}
func (svc *Service) CreateImportChangesetSpecs(ctx context.Context, batchSpec *batcheslib.BatchSpec) ([]*batcheslib.ChangesetSpec, error) {
return batcheslib.BuildImportChangesetSpecs(ctx, batchSpec.ImportChangesets, func(ctx context.Context, repoNames []string) (_ map[string]string, errs error) {
repoNameIDs := map[string]string{}
for _, name := range repoNames {
repo, err := svc.resolveRepositoryName(ctx, name)
if err != nil {
wrapped := errors.Wrapf(err, "resolving repository name %q", name)
errs = multierror.Append(errs, wrapped)
continue
}
repoNameIDs[name] = repo.ID
}
return repoNameIDs, errs
})
}
// ValidateChangesetSpecs validates that among all branch changesets there are no
// duplicates in branch names in a single repo.
func (svc *Service) ValidateChangesetSpecs(repos []*graphql.Repository, specs []*batcheslib.ChangesetSpec) error {
repoByID := make(map[string]*graphql.Repository, len(repos))
for _, repo := range repos {
repoByID[repo.ID] = repo
}
byRepoAndBranch := make(map[string]map[string][]*batcheslib.ChangesetSpec)
for _, spec := range specs {
// We don't need to validate imported changesets, as they can
// never have a critical branch name overlap.
if spec.Type() == batcheslib.ChangesetSpecDescriptionTypeExisting {
continue
}
if _, ok := byRepoAndBranch[spec.HeadRepository]; !ok {
byRepoAndBranch[spec.HeadRepository] = make(map[string][]*batcheslib.ChangesetSpec)
}
byRepoAndBranch[spec.HeadRepository][spec.HeadRef] = append(byRepoAndBranch[spec.HeadRepository][spec.HeadRef], spec)
}
duplicates := make(map[*graphql.Repository]map[string]int)
for repoID, specsByBranch := range byRepoAndBranch {
for branch, specs := range specsByBranch {
if len(specs) < 2 {
continue
}
r := repoByID[repoID]
if _, ok := duplicates[r]; !ok {
duplicates[r] = make(map[string]int)
}
duplicates[r][branch] = len(specs)
}
}
if len(duplicates) > 0 {
return &duplicateBranchesErr{duplicates: duplicates}
}
return nil
}
type duplicateBranchesErr struct {
duplicates map[*graphql.Repository]map[string]int
}
func (e *duplicateBranchesErr) Error() string {
var out strings.Builder
fmt.Fprintf(&out, "Multiple changeset specs have the same branch:\n\n")
for repo, branches := range e.duplicates {
for branch, duplicates := range branches {
branch = strings.TrimPrefix(branch, "refs/heads/")
fmt.Fprintf(&out, "\t* %s: %d changeset specs have the branch %q\n", repo.Name, duplicates, branch)
}
}
fmt.Fprint(&out, "\nMake sure that the changesetTemplate.branch field in the batch spec produces unique values for each changeset in a single repository and rerun this command.")
return out.String()
}
func (svc *Service) ParseBatchSpec(data []byte) (*batcheslib.BatchSpec, error) {
spec, err := batcheslib.ParseBatchSpec(data, batcheslib.ParseBatchSpecOptions{
AllowArrayEnvironments: svc.features.AllowArrayEnvironments,
AllowTransformChanges: svc.features.AllowTransformChanges,
AllowConditionalExec: svc.features.AllowConditionalExec,
AllowFiles: svc.allowFiles,
})
if err != nil {
return nil, errors.Wrap(err, "parsing batch spec")
}
return spec, nil
}
const exampleSpecTmpl = `name: NAME-OF-YOUR-BATCH-CHANGE
description: DESCRIPTION-OF-YOUR-BATCH-CHANGE
# "on" specifies on which repositories to execute the "steps".
on:
# Example: find all repositories that contain a README.md file.
- repositoriesMatchingQuery: file:README.md
# "steps" are run in each repository. Each step is run in a Docker container
# with the repository as the working directory. Once complete, each
# repository's resulting diff is captured.
steps:
# Example: append "Hello World" to every README.md
- run: echo "Hello World" | tee -a $(find -name README.md)
container: alpine:3
# "changesetTemplate" describes the changeset (e.g., GitHub pull request) that
# will be created for each repository.
changesetTemplate:
title: Hello World
body: This adds Hello World to the README
branch: BRANCH-NAME-IN-EACH-REPOSITORY # Push the commit to this branch.
commit:
author:
name: {{ .Author.Name }}
email: {{ .Author.Email }}
message: Append Hello World to all README.md files
`
const exampleSpecPublishFlagTmpl = `
# Change published to true once you're ready to create changesets on the code host.
published: false
`
func (svc *Service) GenerateExampleSpec(ctx context.Context, fileName string) error {
// Try to create file. Bail out, if it already exists.
f, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)
if err != nil {
if os.IsExist(err) {
return fmt.Errorf("file %s already exists", fileName)
}
return errors.Wrapf(err, "failed to create file %s", fileName)
}
defer f.Close()
t := exampleSpecTmpl
if !svc.features.AllowOptionalPublished {
t += exampleSpecPublishFlagTmpl
}
tmpl, err := template.New("").Parse(t)
if err != nil {
return err
}
author := batcheslib.GitCommitAuthor{
Name: "Sourcegraph",
Email: "[email protected]",
}
// Try to get better default values from git, ignore any errors.
gitAuthorName, err1 := getGitConfig("user.name")
gitAuthorEmail, err2 := getGitConfig("user.email")
if err1 == nil && err2 == nil && gitAuthorName != "" && gitAuthorEmail != "" {
author.Name = gitAuthorName
author.Email = gitAuthorEmail
}
err = tmpl.Execute(f, map[string]interface{}{"Author": author})
if err != nil {
return errors.Wrap(err, "failed to write batch spec to file")
}
return nil
}
const namespaceQuery = `
query NamespaceQuery($name: String!) {
user(username: $name) {
id
}
organization(name: $name) {
id
}
}
`
const usernameQuery = `
query GetCurrentUserID {
currentUser {
id
}
}
`
func (svc *Service) ResolveNamespace(ctx context.Context, namespace string) (string, error) {
if namespace == "" {
// if no namespace is provided, default to logged in user as namespace
var resp struct {
Data struct {
CurrentUser struct {
ID string `json:"id"`
} `json:"currentUser"`
} `json:"data"`
}
if ok, err := svc.client.NewRequest(usernameQuery, nil).DoRaw(ctx, &resp); err != nil || !ok {
return "", errors.WithMessage(err, "failed to resolve namespace: no user logged in")
}
if resp.Data.CurrentUser.ID == "" {
return "", errors.New("cannot resolve current user")
}
return resp.Data.CurrentUser.ID, nil
}
var result struct {
Data struct {
User *struct{ ID string }
Organization *struct{ ID string }
}
Errors []interface{}
}
if ok, err := svc.client.NewRequest(namespaceQuery, map[string]interface{}{
"name": namespace,
}).DoRaw(ctx, &result); err != nil || !ok {
return "", err
}
if result.Data.User != nil {
return result.Data.User.ID, nil
}
if result.Data.Organization != nil {
return result.Data.Organization.ID, nil
}
return "", fmt.Errorf("failed to resolve namespace %q: no user or organization found", namespace)
}
func (svc *Service) ResolveRepositories(ctx context.Context, spec *batcheslib.BatchSpec) ([]*graphql.Repository, error) {
seen := map[string]*graphql.Repository{}
unsupported := batches.UnsupportedRepoSet{}
ignored := batches.IgnoredRepoSet{}
// TODO: this could be trivially parallelised in the future.
for _, on := range spec.On {
repos, err := svc.ResolveRepositoriesOn(ctx, &on)
if err != nil {
return nil, errors.Wrapf(err, "resolving %q", on.String())
}
reposWithBranch := make([]*graphql.Repository, 0, len(repos))
for _, repo := range repos {
if !repo.HasBranch() {
continue
}
reposWithBranch = append(reposWithBranch, repo)
}
var repoBatchIgnores map[*graphql.Repository][]string
if !svc.allowIgnored {
repoBatchIgnores, err = svc.FindDirectoriesInRepos(ctx, ".batchignore", reposWithBranch...)
if err != nil {
return nil, err
}
}
for _, repo := range reposWithBranch {
if other, ok := seen[repo.ID]; !ok {
seen[repo.ID] = repo
switch st := strings.ToLower(repo.ExternalRepository.ServiceType); st {
case "github", "gitlab", "bitbucketserver":
default:
if !svc.allowUnsupported {
unsupported.Append(repo)
}
}
if !svc.allowIgnored {
if locations, ok := repoBatchIgnores[repo]; ok && len(locations) > 0 {
ignored.Append(repo)
}
}
} else {
// If we've already seen this repository, we overwrite the
// Commit/Branch fields with the latest value we have
other.Commit = repo.Commit
other.Branch = repo.Branch
}
}
}
final := make([]*graphql.Repository, 0, len(seen))
for _, repo := range seen {
if !unsupported.Includes(repo) && !ignored.Includes(repo) {
final = append(final, repo)
}
}
if unsupported.HasUnsupported() {
return final, unsupported
}
if ignored.HasIgnored() {
return final, ignored
}
return final, nil
}
func (svc *Service) ResolveRepositoriesOn(ctx context.Context, on *batcheslib.OnQueryOrRepository) ([]*graphql.Repository, error) {
if on.RepositoriesMatchingQuery != "" {
return svc.resolveRepositorySearch(ctx, on.RepositoriesMatchingQuery)
} else if on.Repository != "" && on.Branch != "" {
repo, err := svc.resolveRepositoryNameAndBranch(ctx, on.Repository, on.Branch)
if err != nil {
return nil, err
}
return []*graphql.Repository{repo}, nil
} else if on.Repository != "" {
repo, err := svc.resolveRepositoryName(ctx, on.Repository)
if err != nil {
return nil, err
}
return []*graphql.Repository{repo}, nil
}
// This shouldn't happen on any batch spec that has passed validation, but,
// alas, software.
return nil, ErrMalformedOnQueryOrRepository
}
const repositoryNameQuery = `
query Repository($name: String!, $queryCommit: Boolean!, $rev: String!) {
repository(name: $name) {
...repositoryFields
}
}
` + graphql.RepositoryFieldsFragment
func (svc *Service) resolveRepositoryName(ctx context.Context, name string) (*graphql.Repository, error) {
var result struct{ Repository *graphql.Repository }
if ok, err := svc.client.NewRequest(repositoryNameQuery, map[string]interface{}{
"name": name,
"queryCommit": false,
"rev": "",
}).Do(ctx, &result); err != nil || !ok {
return nil, err
}
if result.Repository == nil {
return nil, errors.New("no repository found")
}
return result.Repository, nil
}
func (svc *Service) resolveRepositoryNameAndBranch(ctx context.Context, name, branch string) (*graphql.Repository, error) {
var result struct{ Repository *graphql.Repository }
if ok, err := svc.client.NewRequest(repositoryNameQuery, map[string]interface{}{
"name": name,
"queryCommit": true,
"rev": branch,
}).Do(ctx, &result); err != nil || !ok {
return nil, err
}
if result.Repository == nil {
return nil, errors.New("no repository found")
}
if result.Repository.Commit.OID == "" {
return nil, fmt.Errorf("no branch matching %q found for repository %s", branch, name)
}
result.Repository.Branch = graphql.Branch{
Name: branch,
Target: result.Repository.Commit,
}
return result.Repository, nil
}
// TODO: search result alerts.
const repositorySearchQuery = `
query ChangesetRepos(
$query: String!,
$queryCommit: Boolean!,
$rev: String!,
) {
search(query: $query, version: V2) {
results {
results {
__typename
... on Repository {
...repositoryFields
}
... on FileMatch {
file { path }
repository {
...repositoryFields
}
}
}
}
}
}
` + graphql.RepositoryFieldsFragment
func (svc *Service) resolveRepositorySearch(ctx context.Context, query string) ([]*graphql.Repository, error) {
var result struct {
Search struct {
Results struct {
Results []searchResult
}
}
}
if ok, err := svc.client.NewRequest(repositorySearchQuery, map[string]interface{}{
"query": setDefaultQueryCount(query),
"queryCommit": false,
"rev": "",
}).Do(ctx, &result); err != nil || !ok {
return nil, err
}
ids := map[string]*graphql.Repository{}
var repos []*graphql.Repository
for _, r := range result.Search.Results.Results {
existing, ok := ids[r.ID]
if !ok {
repo := r.Repository
repos = append(repos, &repo)
ids[r.ID] = &repo
} else {
for file := range r.FileMatches {
existing.FileMatches[file] = true
}
}
}
return repos, nil
}
// findDirectoriesResult maps the name of the GraphQL query to its results. The
// name is the repository's ID.
type findDirectoriesResult map[string]struct {
Results struct{ Results []searchResult }
}
const searchQueryTmpl = `%s: search(query: %q, version: V2) {
results {
results {
__typename
... on FileMatch {
file { path }
}
}
}
}
`
const findDirectoriesInReposBatchSize = 50
// FindDirectoriesInRepos returns a map of repositories and the locations of
// files matching the given file name in the repository.
// The locations are paths relative to the root of the directory.
// No "/" at the beginning.
// A dot (".") represents the root directory.
func (svc *Service) FindDirectoriesInRepos(ctx context.Context, fileName string, repos ...*graphql.Repository) (map[*graphql.Repository][]string, error) {
// Build up unique identifiers that are safe to use as GraphQL query aliases.
reposByQueryID := map[string]*graphql.Repository{}
queryIDByRepo := map[*graphql.Repository]string{}
for i, repo := range repos {
queryID := fmt.Sprintf("repo_%d", i)
reposByQueryID[queryID] = repo
queryIDByRepo[repo] = queryID
}
findInBatch := func(batch []*graphql.Repository, results map[*graphql.Repository][]string) error {
var a strings.Builder
a.WriteString("query DirectoriesContainingFile {\n")
for _, repo := range batch {
query := fmt.Sprintf(`file:(^|/)%s$ repo:^%s$@%s type:path count:99999`, regexp.QuoteMeta(fileName), regexp.QuoteMeta(repo.Name), repo.Rev())
a.WriteString(fmt.Sprintf(searchQueryTmpl, queryIDByRepo[repo], query))
}
a.WriteString("}")
var result findDirectoriesResult
if ok, err := svc.client.NewQuery(a.String()).Do(ctx, &result); err != nil || !ok {
return err
}
for queryID, search := range result {
repo, ok := reposByQueryID[queryID]
if !ok {
return fmt.Errorf("result for query %q did not match any repository", queryID)
}
files := map[string]struct{}{}
for _, r := range search.Results.Results {
for file := range r.FileMatches {
files[file] = struct{}{}
}
}
var dirs []string
for f := range files {
dir := path.Dir(f)
// "." means the path is root, but in the executor we use "" to signify root.
if dir == "." {
dir = ""
}
// We use path.Dir and not filepath.Dir here, because while
// src-cli might be executed on Windows, we need the paths to
// be Unix paths, since they will be used inside Docker
// containers.
dirs = append(dirs, dir)
}
results[repo] = dirs
}
return nil
}
results := make(map[*graphql.Repository][]string)
for start := 0; start < len(repos); start += findDirectoriesInReposBatchSize {
if err := ctx.Err(); err != nil {
return nil, err
}
end := start + findDirectoriesInReposBatchSize
if end > len(repos) {
end = len(repos)
}
batch := repos[start:end]
err := findInBatch(batch, results)
if err != nil {
return results, err
}
}
return results, nil
}
var defaultQueryCountRegex = regexp.MustCompile(`\bcount:(\d+|all)\b`)
const hardCodedCount = " count:999999"
func setDefaultQueryCount(query string) string {
if defaultQueryCountRegex.MatchString(query) {
return query
}
return query + hardCodedCount
}
type searchResult struct {
graphql.Repository
}
func (sr *searchResult) UnmarshalJSON(data []byte) error {
var tn struct {
Typename string `json:"__typename"`
}
if err := json.Unmarshal(data, &tn); err != nil {
return err
}
switch tn.Typename {
case "FileMatch":
var result struct {
Repository graphql.Repository
File struct {
Path string
}
}
if err := json.Unmarshal(data, &result); err != nil {
return err
}
sr.Repository = result.Repository
sr.Repository.FileMatches = map[string]bool{result.File.Path: true}
return nil
case "Repository":
if err := json.Unmarshal(data, &sr.Repository); err != nil {
return err
}
sr.Repository.FileMatches = map[string]bool{}
return nil
default:
return errors.Errorf("unknown GraphQL type %q", tn.Typename)
}
}
func | (attribute string) (string, error) {
cmd := exec.Command("git", "config", "--get", attribute)
out, err := cmd.CombinedOutput()
if err != nil {
return "", err
}
return strings.TrimSpace(string(out)), nil
}
| getGitConfig |
plan_insert_into.rs | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_datavalues::DataSchemaRef;
use common_meta_types::MetaId;
use crate::Expression;
use crate::PlanNode;
#[derive(serde::Serialize, serde::Deserialize, Clone)]
pub struct InsertIntoPlan {
pub db_name: String,
pub tbl_name: String,
pub tbl_id: MetaId,
pub schema: DataSchemaRef,
pub overwrite: bool,
pub select_plan: Option<Box<PlanNode>>,
pub value_exprs_opt: Option<Vec<Vec<Expression>>>,
pub format: Option<String>,
}
impl PartialEq for InsertIntoPlan {
fn eq(&self, other: &Self) -> bool {
self.db_name == other.db_name
&& self.tbl_name == other.tbl_name
&& self.schema == other.schema
}
}
impl InsertIntoPlan {
pub fn schema(&self) -> DataSchemaRef {
self.schema.clone()
}
pub fn insert_select(
db: String,
table: String,
table_meta_id: MetaId,
schema: DataSchemaRef,
overwrite: bool,
select_plan: PlanNode,
) -> InsertIntoPlan {
InsertIntoPlan {
db_name: db,
tbl_name: table,
tbl_id: table_meta_id,
schema,
overwrite,
select_plan: Some(Box::new(select_plan)),
value_exprs_opt: None,
format: None,
}
}
pub fn insert_values(
db: String,
table: String,
table_meta_id: MetaId,
schema: DataSchemaRef,
overwrite: bool,
values: Vec<Vec<Expression>>,
) -> InsertIntoPlan {
InsertIntoPlan {
db_name: db,
tbl_name: table,
tbl_id: table_meta_id,
schema,
overwrite,
select_plan: None,
value_exprs_opt: Some(values),
format: None,
}
}
pub fn insert_without_source(
db: String,
table: String,
table_meta_id: MetaId,
schema: DataSchemaRef,
overwrite: bool,
format: Option<String>,
) -> InsertIntoPlan {
InsertIntoPlan {
db_name: db,
tbl_name: table,
tbl_id: table_meta_id, | format,
}
}
} | schema,
overwrite,
select_plan: None,
value_exprs_opt: None, |
sum-of-multiples.rs | pub fn sum_of_multiples(limit: u32, factors: &[u32]) -> u32 | {
let mut sum = 0;
for num in 1..limit {
for factor in factors {
if *factor == 0 {
continue;
}
if num % factor == 0 {
sum += num;
break;
}
}
}
return sum
} |
|
modules_core.py | """This module contains base classes and types for creating new Modules and using module trees.
"""
import abc
from collections import deque
import numpy as np
class ModuleBase(object):
"""The base interface for all modules. Modules must inherit from this interface.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
self._inputs = []
self._outputs = []
def register_inputs(self, *args):
"""Registers inputs to this module.
Parameters
----------
inputs : Variable number of inputs to register.
"""
for arg in args:
if not isinstance(arg, InputPort):
raise ValueError('All inputs must be InputPort type')
self._inputs.append(arg)
def register_outputs(self, *args):
"""Registers outputs to this module.
Parameters
----------
outputs : Variable number of outputs to register.
"""
for arg in args:
if not isinstance(arg, OutputPort):
raise ValueError('All outputs must be OutputPort type')
self._outputs.append(arg)
def foreprop_ready(self):
"""Returns if the module is ready to forward-propagate.
Default implementation returns true when all inputs are ready and
not all outputs are set.
Returns
-------
ready : Boolean denoting if the module is ready to foreprop
"""
return all(self._inputs) and not all(self._outputs)
@abc.abstractmethod
def foreprop(self):
"""Perform forward-propagation for this module.
Returns
-------
ready : The aggregated return list from all forepropped output ports.
"""
return []
def backprop_ready(self):
"""Returns if the module is ready to backward-propagate.
Typically this is when all outputs have received all backprops.
Default implementation checks to see if all outputs are ready to backprop.
Returns
-------
ready : Boolean denoting if the module is ready to backprop
"""
return all([o.backprop_ready() for o in self._outputs])
@abc.abstractmethod
def backprop(self):
"""Perform backward-propagation for this module.
Returns
-------
ready : The aggregated return list from all backpropped input ports.
"""
return []
def is_invalid(self):
"""Returns if the module is fully invalidated.
Typically this is when all ports are invalidated.
Default implementation checks to see if all ports are invalidated.
Returns
-------
invalid : Boolean denoting if this module is fully invalid
"""
return not any(self._inputs) and not any(self._outputs)
def invalidate(self):
"""Invalidate this modules' inputs and outputs.
Default implementation first checks to see if the module is already invalid.
If it is not, it calls invalidate on all inputs and outputs.
Returns
-------
ready : List of modules to invalidate next.
"""
if self.is_invalid():
return []
ready = []
for i in self._inputs:
ready += i.invalidate()
for o in self._outputs:
ready += o.invalidate()
return ready
# TODO Ways to unregister port connections
class InputPort(object):
"""An input to a module. Ideally instantiated as a member of the module.
Parameters
----------
module : The owning module. Must implement the ModuleBase interface.
"""
def __init__(self, module):
if not isinstance(module, ModuleBase):
raise ValueError('module must implement ModuleBase')
self._module = module
self._value = None
self._source = None
def __nonzero__(self):
"""Override of Python boolean test operator to return if the port has a value.
Returns
-------
ready : Boolean denoting if the port has a valid value.
"""
return self._value is not None
def invalidate(self):
"""Invalidate this input port and propagate to the module and source.
Returns
-------
valid : List of modules to invalidate next.
"""
# If we're already invalidated, there's nothing for us to do here
if not self:
return []
self._value = None
valid = []
# If the owning module is not invalid, return it
if not self._module.is_invalid():
|
# Propagate invalidation to source
if self._source is not None:
valid += self._source.invalidate()
return valid
def foreprop(self, v):
"""Set this port's value and forward-propagate.
Typically only called by OutputPorts.
Parameters
----------
v : The value to set the port to.
Returns
-------
ready : List of modules to foreprop next.
"""
self._value = v
if self._module.foreprop_ready():
return [self._module]
else:
return []
def backprop(self, do_dx):
"""Give this port a backpropagation accumulator to pass on.
Typically called by the owning module.
Parameters
----------
do_dx : Numpy 2D array Jacobian[i,j] of tree outputs[i] w.r.t. this input port elements[j].
Returns
-------
ready : List of modules to backprop next.
"""
if self._source is not None:
return self._source.backprop(do_dx)
else:
return []
def register_source(self, src):
"""Register an OutputPort source for this port.
Parameters
----------
src : OutputPort to take as the source of this port.
"""
if not isinstance(src, OutputPort):
raise ValueError('src must be OutputPort')
self._source = src
@property
def value(self):
return self._value
class OutputPort(object):
"""An output from a module. Typically instantiated as a module member.
Parameters
----------
module : The owning module. Must implement the ModuleBase interface.
"""
def __init__(self, module):
if not isinstance(module, ModuleBase):
raise ValueError('module must implement ModuleBase')
self._module = module
self._backprop_acc = None
self._num_backs = 0
self._value = None
self._consumers = []
def __nonzero__(self):
"""Override of Python boolean test operator to return whether this port has a value.
"""
return self.value is not None
@property
def num_consumers(self):
"""Return the number of registered consumers.
"""
return len(self._consumers)
@property
def value(self):
return self._value
def register_consumer(self, con):
"""Register an InputPort consumer to this port.
"""
if not isinstance(con, InputPort):
raise ValueError('Consumer must be InputPort')
self._consumers.append(con)
def invalidate(self):
"""Invalidate this port and propagate.
Returns
-------
valid : List of modules to invalidate next
"""
# If we're already invalid, there's nothing to do
if not self:
return []
self._backprop_acc = None
self._num_backs = 0
self._value = None
valid = []
if not self._module.is_invalid():
valid.append(self._module)
for con in self._consumers:
valid += con.invalidate()
return valid
def foreprop(self, v):
"""Perform forward-propagation through this output.
Typically called by the owning module.
Parameters
----------
v : The value to set this port to.
Returns
-------
ready : List of modules to foreprop next.
"""
self._value = v
ready = []
for con in self._consumers:
ready += con.foreprop(self._value)
return ready
def backprop(self, do_dx):
"""Perform backward-propagation through this output.
Typically called by a connected InputPort.
Only propagates when data from all registered consumers is received.
Parameters
----------
do_dx : Numpy 2D array Jacobian[i,j] of tree outputs[i] w.r.t. this input port elements[j]
Returns
-------
ready : List of modules to backprop next
"""
if do_dx is None:
raise RuntimeError('OutputPort received None backprop value.')
do_dx.tick_descent()
if self._backprop_acc is None:
self._backprop_acc = do_dx
else:
self._backprop_acc += do_dx
self._num_backs += 1
# Check for backprop errors
if self._num_backs > len(self._consumers):
errstr = 'Received %d backprops for %d consumers!' % (self._num_backs, len(self._consumers))
raise RuntimeError(errstr)
# If we've heard from every consumer and our module is ready
if self.backprop_ready() and self._module.backprop_ready():
return [self._module]
else:
return []
def backprop_ready(self):
"""Returns if this port has heard from all its consumers.
"""
return self._num_backs == self.num_consumers
def chain_backprop(self, dy_dx=None):
"""Returns a copy of this port's backprop accumulator right-multiplied by the
given gradient. If the port has not received a backprop, returns None.
"""
if self._backprop_acc is None:
return None
#raise RuntimeError('Cannot chain backprop! Port has not received do_dx.')
out_acc = self._backprop_acc.copy()
if dy_dx is not None:
out_acc = out_acc * dy_dx
return out_acc
@property
def backprop_accumulator(self):
"""Returns the port's backprop accumulator.
"""
return self._backprop_acc
@property
def backprop_value(self):
if self._backprop_acc is None:
return 0
else:
return self._backprop_acc.retrieve()
def link_ports(in_port, out_port):
"""Join an input and output port together.
Parameters
----------
in_port : InputPort to join
out_port : OutputPort to join
"""
if not isinstance(in_port, InputPort):
raise ValueError('in_port must be an InputPort.')
if not isinstance(out_port, OutputPort):
raise ValueError('out_port must be an OutputPort.')
in_port.register_source(out_port)
out_port.register_consumer(in_port)
# @profile
def iterative_operation(init_module, op):
# TODO Allow taking list of initial modules
"""Iteratively perform an operation on a module tree.
This function should be used instead of recursive calls, which do not scale
to deep trees very well.
Parameters
----------
init_module : Module to begin iteration on
op : Function that takes a module and returns a list of modules to operate on next
"""
to_prop = deque()
to_prop.append(init_module)
while len(to_prop) > 0:
current = to_prop.popleft()
ready_children = op(current)
for c in ready_children:
to_prop.append(c)
def iterative_foreprop(init_module):
"""Iterative forward-pass propagation on a module tree.
"""
op = lambda x: x.foreprop()
iterative_operation(init_module, op)
def iterative_backprop(init_module):
"""Iterative backward-pass propagation on a module tree.
"""
op = lambda x: x.backprop()
iterative_operation(init_module, op)
def iterative_invalidate(init_module):
"""Iterative invalidation on a module tree.
"""
op = lambda x: x.invalidate()
iterative_operation(init_module, op)
| valid.append(self._module) |
api.py | # coding: utf-8
import json
import datetime
import hashlib
import webapp2
from google.appengine.api import users, memcache
from google.appengine.ext import ndb
import sil_model
import split_pgn
CHUNK_SIZE = 10
"""
Make sample opening PGN
parse_stm verkar inte klara kommentarer (med åäö?)
--- LATER ---
Memorize games:
find worthy games
maybe import from chessgames.com?
choose source type
if source is "game", choose moves
"forgive me"-knapp som säger att draget inte ska räknas som fel
Instant assessment:
Autogenerate such FENs + score from DB
Way to input and score assessment
Semi-blind tactics:
find games with tactic (crawl or calculate, preferably crawl)
Show position X moves before
Promote to other than queen
Make design nicer with semantic UI instead of jquery ui
More fact management:
keep fact times for same ID
inaktivera fact
list facts for source (so can reactivate)
delete facts when source deleted
Ta bort omedelbar stat, efter create
Create-spinner
Custom CSS for mobile
Fix open new window, board bug
"""
def add_success(fact):
memdb = memcache.Client()
i = memdb.incr('nranswer_%s' % fact.userid, initial_value=0)
return i
def add_fail(fact):
memdb = memcache.Client()
i = memdb.incr('nranswer_%s' % fact.userid, initial_value=0)
fails = memdb.get(fact.userid)
if not fails:
fails = []
fails = [f for f in fails if f != fact]
fails.append((fact, i - 1))
fails = fails[-CHUNK_SIZE:]
memdb.set(fact.userid, fails)
def get_fail(user_id):
memdb = memcache.Client()
fails = memdb.get(user_id)
if not fails:
return None
i = memdb.get('nranswer_%s' % user_id)
if i is None:
i = 0
if fails[0][1] + CHUNK_SIZE > i:
return None
fact = fails.pop(0)[0]
memdb.set(user_id, fails)
return fact
def get_fact(source_id, fact_id):
fact = ndb.Key(sil_model.Factlet,
long(fact_id),
parent=ndb.Key(sil_model.Source, long(source_id))).get()
return fact
class RestHandler(webapp2.RequestHandler):
def jsonify(self, d):
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(d))
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Hello, World!')
class CreateSource(RestHandler):
def get(self):
user = users.get_current_user()
query = sil_model.Source.query(
sil_model.Source.userid == user.user_id())
self.jsonify(
{'sources': [source.to_jdict() for source in query.iter()]})
def post(self):
user = users.get_current_user()
source = sil_model.Source(
userid=user.user_id(),
name=self.request.get('name'),
fact_type=self.request.get('fact_type'))
source.put()
self.jsonify({'key': source.key.id()})
class SingleSource(RestHandler):
def get(self, source_id):
source = ndb.Key(sil_model.Source, long(source_id)).get()
d = source.to_jdict()
d['facts'] = [f.to_jdict()
for f in sil_model.Factlet.query_source(source_id)]
self.jsonify(d)
def delete(self, source_id):
ndb.Key(sil_model.Source, long(source_id)).delete()
class CreateFact(RestHandler):
def get(self):
user = users.get_current_user()
query = sil_model.Factlet.query(
sil_model.Factlet.userid == user.user_id())
self.jsonify(
{'keys': [key.id() for key in query.iter(keys_only=True)]})
def post(self):
user = users.get_current_user()
fact = self.request.get('fact').encode('utf8')
fact_obj = sil_model.Factlet(
parent=ndb.Key(sil_model.Source,
long(self.request.get('source_id'))),
userid=user.user_id(),
fact=fact, )
fact_obj.put()
self.jsonify({'key': fact_obj.key.id()})
class SingleFact(RestHandler):
def get(self, source_id, fact_id):
fact = get_fact(source_id, fact_id)
self.jsonify(fact.to_jdict())
def delete(self, source_id, fact_id):
parent = ndb.Key(sil_model.Source, long(source_id))
ndb.Key(sil_model.Factlet, long(fact_id), parent=parent).delete()
class SourceLearner(RestHandler):
def get(self, source_id):
user = users.get_current_user()
fact = get_fail(user.user_id())
if not fact or int(fact.key.parent().get().key.id()) != int(source_id):
fact = sil_model.Factlet.get_next(user.user_id(), source_id)
self.jsonify(fact.to_jdict())
class Answer(SourceLearner):
def post(self, source_id, fact_id, result):
fact = get_fact(source_id, fact_id)
if result == 'success':
fact.success()
add_success(fact)
else:
fact.fail()
add_fail(fact)
fact.put()
self.get(source_id)
class SourceStat(RestHandler):
def get(self, source_id):
user = users.get_current_user()
tot = sil_model.Factlet.count(user.user_id(), source_id)
left = sil_model.Factlet.count_left(user.user_id(), source_id) |
nextfact = sil_model.Factlet.get_next(user.user_id(), source_id)
if nextfact:
next = (nextfact.next_scheduled - datetime.datetime(1970, 1, 1)
).total_seconds()
else:
next = 0
self.jsonify({'total': tot,
'left': left,
'key': source_id,
'next': next})
class AddOpening(RestHandler):
def post(self, source_id):
user = users.get_current_user()
source = ndb.Key(sil_model.Source, long(source_id))
color = self.request.get('color')
def make_fact(pgn, headers):
hid = hashlib.md5(user.user_id() + ''.join(x['move'] for x in
pgn)).hexdigest()
hid = int(hid[:14], 16)
fd = {'moves': pgn, 'orientation': color}
if 'FEN' in headers:
fd['fen'] = headers['FEN']
fd['orientation'] = 'b' if ' w ' in fd['fen'] else 'w'
fact = sil_model.Factlet(
parent=source,
id=hid,
userid=user.user_id(),
# use 'fen' for start positions
fact=json.dumps(fd), )
return fact
pgns = split_pgn.split_pgns(self.request.get('pgn'), color == 'w')
keys = ndb.put_multi(
[make_fact(pgn, headers) for pgn, headers in pgns])
self.jsonify({'keys': [key.id() for key in keys]})
class StageData(RestHandler):
def get(self):
user = users.get_current_user()
source = sil_model.Source(
userid=user.user_id(), name='stage', fact_type='opening')
source.put()
color = 'b'
def make_fact(pgn):
fact = sil_model.Factlet(
parent=source.key,
userid=user.user_id(),
# use 'fen' for start positions
fact=json.dumps({'moves': pgn,
'orientation': color}), )
return fact
pgns = split_pgn.split_pgn(open('data/black.pgn').read(), color == 'w')
keys = ndb.put_multi([make_fact(pgn) for pgn in pgns])
self.jsonify(source.key.id())
app = webapp2.WSGIApplication(
[
('/', MainPage), ('/fact', CreateFact), ('/source', CreateSource),
('/source/(\d+)', SingleSource), ('/source/(\d+)/(\d+)', SingleFact),
('/source/(\d+)/(\d+)/(success|fail)',
Answer), ('/source/(\d+)/next', SourceLearner),
('/source/(\d+)/stat', SourceStat),
('/source/(\d+)/opening', AddOpening), ('/stagedata', StageData)
],
debug=True) | |
0002_tag.py | # Generated by Django 3.1.4 on 2020-12-12 06:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'), | operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
] | ]
|
bhcache.go | package dman
import (
"strconv"
"sync"
"github.com/MadBase/MadNet/consensus/objs"
"github.com/MadBase/MadNet/utils"
)
type bHCache struct {
sync.Mutex
cache map[uint32]string
}
func (bhc *bHCache) Init() error {
bhc.cache = make(map[uint32]string)
return nil
}
func (bhc *bHCache) Add(bh *objs.BlockHeader) error {
bhc.Lock()
defer bhc.Unlock()
bhBytes, err := bh.MarshalBinary()
if err != nil {
return err
}
bhc.cache[bh.BClaims.Height] = string(bhBytes)
return nil
}
func (bhc *bHCache) Contains(height uint32) bool {
bhc.Lock()
defer bhc.Unlock()
if _, ok := bhc.getInternal(height); ok {
return true
}
return false
}
func (bhc *bHCache) Get(height uint32) (*objs.BlockHeader, bool) {
bhc.Lock()
defer bhc.Unlock()
return bhc.getInternal(height)
}
func (bhc *bHCache) getInternal(height uint32) (*objs.BlockHeader, bool) {
bhIf, ok := bhc.cache[height]
if ok {
bhString := bhIf
bhBytes := []byte(bhString)
bhCopy := utils.CopySlice(bhBytes)
bh := &objs.BlockHeader{}
err := bh.UnmarshalBinary(bhCopy)
if err != nil {
bhc.delInternal(height)
return nil, false
}
return bh, true
}
return nil, false
}
func (bhc *bHCache) Del(height uint32) {
bhc.Lock()
defer bhc.Unlock()
bhc.delInternal(height)
}
func (bhc *bHCache) delInternal(height uint32) {
delete(bhc.cache, height)
}
func (bhc *bHCache) DropBeforeHeight(dropHeight uint32) []string {
out := []string{}
if dropHeight-256 > dropHeight {
return out
}
bhc.Lock()
defer bhc.Unlock()
for height := range bhc.cache {
height := height
if height <= uint32(dropHeight) |
}
return out
}
| {
out = append(out, strconv.Itoa(int(height)))
bhc.delInternal(height)
} |
test_example.py | from handball.core.test import TestCase
from handball.users.factories import UserFactory
class | (TestCase):
def test_example(self):
UserFactory()
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
| TestExample |
compiler_value_location_test.go | package compiler
import (
"testing"
"unsafe"
"github.com/tetratelabs/wazero/internal/asm"
"github.com/tetratelabs/wazero/internal/testing/require"
) | for _, r := range unreservedGeneralPurposeRegisters {
require.True(t, isIntRegister(r))
}
}
func Test_isVectorRegister(t *testing.T) {
for _, r := range unreservedVectorRegisters {
require.True(t, isVectorRegister(r))
}
}
func TestRuntimeValueLocationStack_basic(t *testing.T) {
s := newRuntimeValueLocationStack()
// Push stack value.
loc := s.pushRuntimeValueLocationOnStack()
require.Equal(t, uint64(1), s.sp)
require.Equal(t, uint64(0), loc.stackPointer)
// Push the register value.
tmpReg := unreservedGeneralPurposeRegisters[0]
loc = s.pushRuntimeValueLocationOnRegister(tmpReg, runtimeValueTypeI64)
require.Equal(t, uint64(2), s.sp)
require.Equal(t, uint64(1), loc.stackPointer)
require.Equal(t, tmpReg, loc.register)
require.Equal(t, loc.valueType, runtimeValueTypeI64)
// markRegisterUsed.
tmpReg2 := unreservedGeneralPurposeRegisters[1]
s.markRegisterUsed(tmpReg2)
require.NotNil(t, s.usedRegisters[tmpReg2], tmpReg2)
// releaseRegister.
s.releaseRegister(loc)
require.Equal(t, s.usedRegisters[loc.register], struct{}{}, "expected %v to not contain %v", s.usedRegisters, loc.register)
require.Equal(t, asm.NilRegister, loc.register)
// Clone.
cloned := s.clone()
require.Equal(t, s.usedRegisters, cloned.usedRegisters)
require.Equal(t, len(s.stack), len(cloned.stack))
require.Equal(t, s.sp, cloned.sp)
for i := 0; i < int(s.sp); i++ {
actual, exp := s.stack[i], cloned.stack[i]
require.NotEqual(t, uintptr(unsafe.Pointer(exp)), uintptr(unsafe.Pointer(actual)))
}
// Check the max stack pointer.
for i := 0; i < 1000; i++ {
s.pushRuntimeValueLocationOnStack()
}
for i := 0; i < 1000; i++ {
s.pop()
}
require.Equal(t, uint64(1001), s.stackPointerCeil)
}
func TestRuntimeValueLocationStack_takeFreeRegister(t *testing.T) {
s := newRuntimeValueLocationStack()
// For int registers.
r, ok := s.takeFreeRegister(registerTypeGeneralPurpose)
require.True(t, ok)
require.True(t, isIntRegister(r))
// Mark all the int registers used.
for _, r := range unreservedGeneralPurposeRegisters {
s.markRegisterUsed(r)
}
// Now we cannot take free ones for int.
_, ok = s.takeFreeRegister(registerTypeGeneralPurpose)
require.False(t, ok)
// But we still should be able to take float regs.
r, ok = s.takeFreeRegister(registerTypeVector)
require.True(t, ok)
require.True(t, isVectorRegister(r))
// Mark all the float registers used.
for _, r := range unreservedVectorRegisters {
s.markRegisterUsed(r)
}
// Now we cannot take free ones for floats.
_, ok = s.takeFreeRegister(registerTypeVector)
require.False(t, ok)
}
func TestRuntimeValueLocationStack_takeStealTargetFromUsedRegister(t *testing.T) {
s := newRuntimeValueLocationStack()
intReg := unreservedGeneralPurposeRegisters[0]
intLocation := &runtimeValueLocation{register: intReg}
floatReg := unreservedVectorRegisters[0]
floatLocation := &runtimeValueLocation{register: floatReg}
s.push(intLocation)
s.push(floatLocation)
// Take for float.
target, ok := s.takeStealTargetFromUsedRegister(registerTypeVector)
require.True(t, ok)
require.Equal(t, floatLocation, target)
// Take for ints.
target, ok = s.takeStealTargetFromUsedRegister(registerTypeGeneralPurpose)
require.True(t, ok)
require.Equal(t, intLocation, target)
// Pop float value.
popped := s.pop()
require.Equal(t, floatLocation, popped)
// Now we cannot find the steal target.
target, ok = s.takeStealTargetFromUsedRegister(registerTypeVector)
require.False(t, ok)
require.Nil(t, target)
// Pop int value.
popped = s.pop()
require.Equal(t, intLocation, popped)
// Now we cannot find the steal target.
target, ok = s.takeStealTargetFromUsedRegister(registerTypeGeneralPurpose)
require.False(t, ok)
require.Nil(t, target)
} |
func Test_isIntRegister(t *testing.T) { |
Exercises1.py | def hello():
""" prints hello, world """
print("Hello, world!")
def areacircle(radius):
""" Computes the area of a circle of the given radius """
area = 3.14*radius**2
print("The area of a circle of radius",radius,"is", area)
def areatriangle(b,h):
area = 0.5*b*h
print("The area of a triangle is", area)
def fahrenheit_to_celsius(temp):
""" Converts Fahrenheit temperature to Celsius.
Formula is 5/9 of temp minus 32 """
# Note that this line is not executed
# end='' keeps print from starting a new line.
newTemp = 5*(temp-32)/9
print("The Fahrenheit temperature",temp,"is equivalent to",newTemp,end='')
print(" degrees Celsius")
def celsius_to_fahrenheit(temp):
newTemp = (9/5*(temp))+32
print("The Celsius temperature",temp,"is equivalent to",newTemp,end='')
print(" degrees Fahrenheit")
def name():
""" Input first and last name, combine to one string and print """
fname = input("Enter your first name: ")
lname = input("Enter your last name: ")
city = input("Enter the city you live in: ")
state = input("Enter the state you live in: ")
fullname = fname + " " + lname
print("Your name is:", fullname, "City: ", city, " state", state)
def absolutevalue(num):
if num>0:
print(num)
elif num<0:
print(-1*num)
def fahrenheit_to_celsius3():
""" Input from keyboard, which is always a string and must often be
converted to an int or float.
Converts Fahrenheit temp to Celsius.
Uses if to check whether input is a number and then uses .isdigit() method
of strings to check whether input is made of digits.
"""
temp_str = input("Enter a Fahrentheit temperature: ")
if temp_str:
if temp_str.isdigit():
temp = int(temp_str)
newTemp = 5*(temp-32)/9
print("The Fahrenheit temperature",temp,"is equivalent to ",end='')
print(newTemp,"degrees Celsius")
else:
print("You must enter a number. Bye")
def inches_to_feet1(inches):
""" converts inches to feet and inches """
feet = inches//12 # division by integer with fraction thrown away
extra_inches = inches%12
print(inches,"inches is",feet,"feet and",extra_inches,"inches")
def cheer2():
""" Same as cheer, but uses a for loop and range()
range uses a start number, a stop number and a step size. """
for ct in range(2,9,2):
print(ct,end=' ')
print()
print("Who do we appreciate?")
print("COURSERA!")
def | ():
for ct in range(10,0,-1):
print(ct,end=' ')
print("BlastOff!")
print("Who do we appreciate?")
print("COURSERA!")
def favorite():
my_toy = input("What is my favorite toy? ")
print("Your favorite toy is", my_toy)
| countdown1 |
sprite.rs | pub use crate::texture::texture::{TextureData2d, TextureKind};
use fna3h::Color;
use crate::geom2d::*;
/// Conversion
impl TextureData2d {
pub fn trim_px(&self, rect: impl Into<[u32; 4]>) -> SubTextureData2d {
let rect = rect.into();
let uv_rect = [
rect[0] as f32 / self.w as f32,
rect[1] as f32 / self.h as f32,
rect[2] as f32 / self.w as f32,
rect[3] as f32 / self.h as f32,
];
SubTextureData2d {
texture: self.clone(),
uv_rect,
}
}
pub fn trim_uv(&self, uv_rect: impl Into<Rect2f>) -> SubTextureData2d {
SubTextureData2d {
texture: self.clone(),
// TODO: change this: T -> Rect2f -> [f32; 4]
uv_rect: uv_rect.into().into(),
}
}
}
/// 2D texture handle with region (uv values)
#[derive(Debug, PartialEq, Clone)]
pub struct SubTextureData2d {
pub(crate) texture: TextureData2d,
pub(crate) uv_rect: [f32; 4],
}
impl SubTextureData2d {
pub fn new(texture: TextureData2d, uv_rect: impl Into<[f32; 4]>) -> Self {
Self {
texture,
uv_rect: uv_rect.into(),
}
}
pub fn size(&self) -> [f32; 2] {
self.texture.size()
}
pub fn size_uv(&self) -> [f32; 2] {
let size = self.texture.size();
let parent = [self.uv_rect[2], self.uv_rect[3]];
[size[0] / parent[0], size[1] / parent[1]]
}
}
impl AsRef<TextureData2d> for SubTextureData2d {
fn as_ref(&self) -> &TextureData2d {
&self.texture
}
}
/// 2D texture handle with region (uv values), origin, color, scale, rotation and flips
#[derive(Debug, Clone)]
pub struct SpriteData {
pub texture: TextureData2d,
pub uv_rect: Rect2f,
/// [0.0, 0.0] is left up (default value), [1.0, 1.0] is right down
pub origin: Vec2f,
pub color: Color,
pub scale: Vec2f,
pub rot: f32,
pub flips: Flips,
}
impl AsRef<TextureData2d> for SpriteData {
fn as_ref(&self) -> &TextureData2d {
&self.texture
}
}
impl SpriteData {
/// Alternative to [`Default`]
pub fn | (texture: TextureData2d) -> Self {
Self {
texture,
uv_rect: Rect2f::unit(),
origin: Vec2f::zero(),
color: Color::white(),
scale: Vec2f::one(),
rot: 0.0,
flips: Flips::NONE,
}
}
pub fn texture_w(&self) -> u32 {
self.texture.w
}
pub fn texture_y(&self) -> u32 {
self.texture.w
}
pub fn texture_size_px(&self) -> [u32; 2] {
[self.texture.w, self.texture.h]
}
pub fn size_px(&self) -> Vec2f {
self.uv_rect.size() * Vec2f::new(self.texture.w as f32, self.texture.h as f32)
}
pub fn size_uv(&self) -> Vec2f {
self.uv_rect.size()
}
}
| from_texture |
STOOGE_SORT.py | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr , l , h ) :
if l >= h :
return
if arr [ l ] > arr [ h ] :
t = arr [ l ]
arr [ l ] = arr [ h ]
arr [ h ] = t
if h - l + 1 > 2 :
t = ( int ) ( ( h - l + 1 ) / 3 ) | f_gold ( arr , l , ( h - t ) )
#TOFILL
if __name__ == '__main__':
param = [
([6, 25, 42, 52, 53, 54, 58, 66, 67, 70],6,6,),
([-13, -98, 50, -63, 48, 3, -76, 12, -35, 93, 29, 17, 16, 5, -97, -54, -45, -25],16,14,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],28,24,),
([7, 49, 26, 33, 48, 79, 2, 71, 32, 4, 20, 36],9,10,),
([88],0,0,),
([1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],20,28,),
([2, 2, 4, 5, 7, 12, 12, 14, 14, 16, 17, 29, 29, 31, 32, 39, 41, 47, 48, 49, 51, 54, 58, 58, 59, 60, 73, 78, 80, 81, 82, 83, 84, 85, 90, 95, 97, 99, 99],28,29,),
([-31, -55, 6, 37, 77, 61, 0, 46, -91, -38, 85, -71, 25, 14, 53, 43, 34],15,11,),
([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],12,17,),
([77, 68, 78, 97, 92, 52, 37, 8, 44, 98, 5, 69, 31, 45, 9, 32, 33, 67, 30, 76, 29, 3, 90, 57, 30, 9, 26, 2, 62, 3, 46, 68, 25, 51, 13, 44, 35, 55],27,20,)
]
filled_function_param = [
([6, 25, 42, 52, 53, 54, 58, 66, 67, 70],6,6,),
([-13, -98, 50, -63, 48, 3, -76, 12, -35, 93, 29, 17, 16, 5, -97, -54, -45, -25],16,14,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],28,24,),
([7, 49, 26, 33, 48, 79, 2, 71, 32, 4, 20, 36],9,10,),
([88],0,0,),
([1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],20,28,),
([2, 2, 4, 5, 7, 12, 12, 14, 14, 16, 17, 29, 29, 31, 32, 39, 41, 47, 48, 49, 51, 54, 58, 58, 59, 60, 73, 78, 80, 81, 82, 83, 84, 85, 90, 95, 97, 99, 99],28,29,),
([-31, -55, 6, 37, 77, 61, 0, 46, -91, -38, 85, -71, 25, 14, 53, 43, 34],15,11,),
([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],12,17,),
([77, 68, 78, 97, 92, 52, 37, 8, 44, 98, 5, 69, 31, 45, 9, 32, 33, 67, 30, 76, 29, 3, 90, 57, 30, 9, 26, 2, 62, 3, 46, 68, 25, 51, 13, 44, 35, 55],27,20,)
]
n_success = 0
for i, parameters_set in enumerate(param):
f_filled(*(filled_function_param[i]))
f_gold(*parameters_set)
if parameters_set == filled_function_param[i]:
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | f_gold ( arr , l , ( h - t ) )
f_gold ( arr , l + t , ( h ) ) |
dyn.rs | use dynasm::dynasm;
use dynasmrt::{DynasmApi, DynasmLabelApi};
use std::io::{stdout, Write};
// unsafe extern "sysv64" fn print(buf: *const u8, len: u64) -> u8 {
// let buf = std::slice::from_raw_parts(buf, len as usize);
// stdout().write_all(buf).is_err() as u8
// }
// 避免 panic
unsafe extern "sysv64" fn print(buf: *const u8, len: u64) -> u8 {
| main() {
let mut ops = dynasmrt::x64::Assembler::new().unwrap();
let s = b"Hello, JIT\n";
dynasm!(ops
; .arch x64
; ->hello: // 字符串label名为 hello
; .bytes s
);
let oft = ops.offset(); // 字符串地址偏移
dynasm!(ops
; lea rdi, [->hello] // 将字符串地址存储在 rdi 中
; mov rsi, QWORD s.len() as _ // 将字符串长度存储在 rsi 中
; mov rax, QWORD print as _ // 将 print 函数地址放入 rax
; call rax // 调用函数
; ret // 返回
);
let asm = ops.finalize().unwrap();
let hello_fn: unsafe extern "sysv64" fn() -> u8 = unsafe {
// 得到调用函数的汇编便宜地址,并将其作为函数地址返回
std::mem::transmute(asm.ptr(oft))
};
let ret = unsafe { hello_fn() };
assert_eq!(ret, 0);
}
| let ret = std::panic::catch_unwind(|| {
let buf = std::slice::from_raw_parts(buf, len as usize);
stdout().write_all(buf).is_err()
});
match ret {
Ok(false) => 0,
Ok(true) => 1,
Err(_) => 2,
}
}
fn |
oci.rs | // Copyright (c) 2021 Quark Container Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(non_camel_case_types)]
use alloc::string::String;
use alloc::collections::btree_map::BTreeMap;
use alloc::vec::Vec;
fn is_false(b: &bool) -> bool {
!b
}
#[derive(Debug)]
pub struct Platform {
pub os: String,
pub arch: String,
}
#[derive(Default, PartialEq, Debug)]
pub struct Box {
pub height: u64,
pub width: u64,
}
fn is_default<T: Default + PartialEq>(b: &T) -> bool {
*b == T::default()
}
#[derive(Debug)]
pub struct User {
pub uid: u32,
pub gid: u32,
pub additional_gids: Vec<u32>,
pub username: String,
}
// this converts directly to the correct int
#[derive(Debug, Clone, Copy)]
pub enum LinuxRlimitType {
RLIMIT_CPU,
// CPU time in sec
RLIMIT_FSIZE,
// Maximum filesize
RLIMIT_DATA,
// max data size
RLIMIT_STACK,
// max stack size
RLIMIT_CORE,
// max core file size
RLIMIT_RSS,
// max resident set size
RLIMIT_NPROC,
// max number of processes
RLIMIT_NOFILE,
// max number of open files
RLIMIT_MEMLOCK,
// max locked-in-memory address space
RLIMIT_AS,
// address space limit
RLIMIT_LOCKS,
// maximum file locks held
RLIMIT_SIGPENDING,
// max number of pending signals
RLIMIT_MSGQUEUE,
// maximum bytes in POSIX mqueues
RLIMIT_NICE,
// max nice prio allowed to raise to
RLIMIT_RTPRIO,
// maximum realtime priority
RLIMIT_RTTIME,
// timeout for RT tasks in us
}
#[derive(Debug)]
pub struct LinuxRlimit {
pub typ: LinuxRlimitType,
pub hard: u64,
pub soft: u64,
}
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum LinuxCapabilityType {
CAP_CHOWN,
CAP_DAC_OVERRIDE,
CAP_DAC_READ_SEARCH,
CAP_FOWNER,
CAP_FSETID,
CAP_KILL,
CAP_SETGID,
CAP_SETUID,
CAP_SETPCAP,
CAP_LINUX_IMMUTABLE,
CAP_NET_BIND_SERVICE,
CAP_NET_BROADCAST,
CAP_NET_ADMIN,
CAP_NET_RAW,
CAP_IPC_LOCK,
CAP_IPC_OWNER,
CAP_SYS_MODULE,
CAP_SYS_RAWIO,
CAP_SYS_CHROOT,
CAP_SYS_PTRACE,
CAP_SYS_PACCT,
CAP_SYS_ADMIN,
CAP_SYS_BOOT,
CAP_SYS_NICE,
CAP_SYS_RESOURCE,
CAP_SYS_TIME,
CAP_SYS_TTY_CONFIG,
CAP_MKNOD,
CAP_LEASE,
CAP_AUDIT_WRITE,
CAP_AUDIT_CONTROL,
CAP_SETFCAP,
CAP_MAC_OVERRIDE,
CAP_MAC_ADMIN,
CAP_SYSLOG,
CAP_WAKE_ALARM,
CAP_BLOCK_SUSPEND,
CAP_AUDIT_READ,
}
#[derive(Debug)]
pub struct LinuxCapabilities {
pub bounding: Vec<LinuxCapabilityType>,
pub effective: Vec<LinuxCapabilityType>,
pub inheritable: Vec<LinuxCapabilityType>,
pub permitted: Vec<LinuxCapabilityType>,
pub ambient: Vec<LinuxCapabilityType>,
}
#[derive(Debug)]
pub struct Process {
pub terminal: bool,
pub console_size: Box,
pub user: User,
pub args: Vec<String>,
pub env: Vec<String>,
pub cwd: String,
pub capabilities: Option<LinuxCapabilities>,
pub rlimits: Vec<LinuxRlimit>,
pub no_new_privileges: bool,
pub apparmor_profile: String,
pub selinux_label: String,
}
#[derive(Debug)]
pub struct Root {
pub path: String,
pub readonly: bool,
}
#[derive(Debug, Clone)]
pub struct Mount {
pub destination: String,
pub typ: String,
pub source: String,
pub options: Vec<String>,
}
#[derive(Debug)]
pub struct Hook {
pub path: String,
pub args: Vec<String>,
pub env: Vec<String>,
pub timeout: Option<i64>,
}
#[derive(Debug)]
pub struct Hooks {
pub prestart: Vec<Hook>,
pub poststart: Vec<Hook>,
pub poststop: Vec<Hook>,
}
#[derive(Debug, Clone)]
pub struct LinuxIDMapping {
pub host_id: u32,
pub container_id: u32,
pub size: u32,
}
// a is for LinuxDeviceCgroup
#[derive(Debug, Clone, Copy)]
pub enum LinuxDeviceType {
b,
c,
u,
p,
a,
}
impl Default for LinuxDeviceType {
fn default() -> LinuxDeviceType {
LinuxDeviceType::a
}
}
#[derive(Debug)]
pub struct LinuxDeviceCgroup {
pub allow: bool,
pub typ: LinuxDeviceType,
pub major: Option<i64>,
pub minor: Option<i64>,
pub access: String,
}
#[derive(Debug)]
pub struct LinuxMemory {
pub limit: Option<i64>,
pub reservation: Option<i64>,
pub swap: Option<i64>,
pub kernel: Option<i64>,
pub kernel_tcp: Option<i64>,
pub swappiness: Option<u64>,
}
#[derive(Debug)]
pub struct LinuxCPU {
pub shares: Option<u64>,
pub quota: Option<i64>,
pub period: Option<u64>,
pub realtime_runtime: Option<i64>,
pub realtime_period: Option<u64>,
pub cpus: String,
pub mems: String,
}
#[derive(Debug)]
pub struct LinuxPids {
pub limit: i64,
}
#[derive(Debug)]
pub struct LinuxWeightDevice {
pub major: i64,
pub minor: i64,
pub weight: Option<u16>,
pub leaf_weight: Option<u16>,
}
#[derive(Debug)]
pub struct LinuxThrottleDevice {
pub major: i64,
pub minor: i64,
pub rate: u64,
}
#[derive(Debug)]
pub struct LinuxBlockIO {
pub weight: Option<u16>,
pub leaf_weight: Option<u16>,
pub weight_device: Vec<LinuxWeightDevice>,
pub throttle_read_bps_device: Vec<LinuxThrottleDevice>,
pub throttle_write_bps_device: Vec<LinuxThrottleDevice>,
pub throttle_read_iops_device: Vec<LinuxThrottleDevice>,
pub throttle_write_iops_device: Vec<LinuxThrottleDevice>,
}
#[derive(Debug)]
pub struct LinuxHugepageLimit {
pub page_size: String,
pub limit: i64,
}
#[derive(Debug)]
pub struct LinuxInterfacePriority {
pub name: String,
pub priority: u32,
}
#[derive(Debug)]
pub struct LinuxNetwork {
pub class_id: Option<u32>,
pub priorities: Vec<LinuxInterfacePriority>,
}
#[derive(Default, Debug)]
pub struct LinuxResources {
pub devices: Vec<LinuxDeviceCgroup>,
// NOTE: spec uses a pointer here, so perhaps this should be an Option, but
// false == unset so we don't bother.
pub disable_oom_killer: bool,
// NOTE: spec refers to this as an isize but the range is -1000 to 1000, so
// an i32 seems just fine
pub oom_score_adj: Option<i32>,
pub memory: Option<LinuxMemory>,
pub cpu: Option<LinuxCPU>,
pub pids: Option<LinuxPids>,
pub block_io: Option<LinuxBlockIO>,
pub hugepage_limits: Vec<LinuxHugepageLimit>,
pub network: Option<LinuxNetwork>,
}
#[derive(Debug, Clone, Copy)]
pub enum LinuxNamespaceType {
mount = 0x00020000,
/* New mount namespace group */
cgroup = 0x02000000,
/* New cgroup namespace */
uts = 0x04000000,
/* New utsname namespace */
ipc = 0x08000000,
/* New ipc namespace */
user = 0x10000000,
/* New user namespace */
pid = 0x20000000,
/* New pid namespace */
network = 0x40000000,
/* New network namespace */
}
#[derive(Debug)]
pub struct LinuxNamespace {
pub typ: LinuxNamespaceType,
pub path: String,
}
#[derive(Debug)]
pub struct LinuxDevice {
pub path: String,
pub typ: LinuxDeviceType,
pub major: u64,
pub minor: u64,
pub file_mode: Option<u32>,
pub uid: Option<u32>,
pub gid: Option<u32>,
}
#[derive(Debug, Clone, Copy)]
#[repr(u32)]
pub enum LinuxSeccompAction {
SCMP_ACT_KILL = 0x00000000,
SCMP_ACT_TRAP = 0x00030000,
SCMP_ACT_ERRNO = 0x00050001,
/* ERRNO + EPERM */
SCMP_ACT_TRACE = 0x7ff00001,
/* TRACE + EPERM */
SCMP_ACT_ALLOW = 0x7fff0000,
}
#[derive(Debug, Clone, Copy)]
pub enum | {
SCMP_ARCH_NATIVE = 0x00000000,
SCMP_ARCH_X86 = 0x40000003,
SCMP_ARCH_X86_64 = 0xc000003e,
SCMP_ARCH_X32 = 0x4000003e,
SCMP_ARCH_ARM = 0x40000028,
SCMP_ARCH_AARCH64 = 0xc00000b7,
SCMP_ARCH_MIPS = 0x00000008,
SCMP_ARCH_MIPS64 = 0x80000008,
SCMP_ARCH_MIPS64N32 = 0xa0000008,
SCMP_ARCH_MIPSEL = 0x40000008,
SCMP_ARCH_MIPSEL64 = 0xc0000008,
SCMP_ARCH_MIPSEL64N32 = 0xe0000008,
SCMP_ARCH_PPC = 0x00000014,
SCMP_ARCH_PPC64 = 0x80000015,
SCMP_ARCH_PPC64LE = 0xc0000015,
SCMP_ARCH_S390 = 0x00000016,
SCMP_ARCH_S390X = 0x80000016,
}
#[derive(Debug, Clone, Copy)]
#[repr(u32)]
pub enum LinuxSeccompOperator {
SCMP_CMP_NE = 1,
/* not equal */
SCMP_CMP_LT = 2,
/* less than */
SCMP_CMP_LE = 3,
/* less than or equal */
SCMP_CMP_EQ = 4,
/* equal */
SCMP_CMP_GE = 5,
/* greater than or equal */
SCMP_CMP_GT = 6,
/* greater than */
SCMP_CMP_MASKED_EQ = 7,
/* masked equality */
}
#[derive(Debug)]
pub struct LinuxSeccompArg {
pub index: usize,
pub value: u64,
pub value_two: u64,
pub op: LinuxSeccompOperator,
}
#[derive(Debug)]
pub struct LinuxSyscall {
// old version used name
pub name: String,
pub names: Vec<String>,
pub action: LinuxSeccompAction,
pub args: Vec<LinuxSeccompArg>,
}
#[derive(Debug)]
pub struct LinuxSeccomp {
pub default_action: LinuxSeccompAction,
pub architectures: Vec<Arch>,
pub syscalls: Vec<LinuxSyscall>,
}
#[derive(Debug)]
pub struct Linux {
pub uid_mappings: Vec<LinuxIDMapping>,
pub gid_mappings: Vec<LinuxIDMapping>,
pub sysctl: BTreeMap<String, String>,
pub resources: Option<LinuxResources>,
pub cgroups_path: String,
pub namespaces: Vec<LinuxNamespace>,
pub devices: Vec<LinuxDevice>,
pub seccomp: Option<LinuxSeccomp>,
pub rootfs_propagation: String,
pub masked_paths: Vec<String>,
pub readonly_paths: Vec<String>,
pub mount_label: String,
}
// NOTE: Solaris and Windows are ignored for the moment
pub type Solaris = Value;
pub type Windows = Value;
pub type Value = i32;
#[derive(Debug)]
pub struct Spec {
pub version: String,
// NOTE: Platform was removed, but keeping it as an option
// to support older docker versions
pub platform: Option<Platform>,
//pub process: Process,
pub root: Root,
pub hostname: String,
pub mounts: Vec<Mount>,
pub hooks: Option<Hooks>,
pub annotations: BTreeMap<String, String>,
pub linux: Option<Linux>,
pub solaris: Option<Solaris>,
pub windows: Option<Windows>,
}
#[derive(Debug)]
pub struct State {
pub version: String,
pub id: String,
pub status: String,
pub pid: i32,
pub bundle: String,
pub annotations: BTreeMap<String, String>,
}
| Arch |
expect-package-json.ts | import { PackageJsonFile } from './package-json-file';
/**
* Expect package.json file
*/
export function | ( filePath: string, checks: {
packageName: string,
} ): void {
let file: PackageJsonFile;
const fileName: string = checks.packageName.split( '/' ).pop();
it( 'should exist and be valid JSON', () => {
file = new PackageJsonFile( filePath );
} );
it( 'should not be empty', () => {
expect( file.isEmpty() ).toBe( false );
} );
it( 'should have the correct package name', () => {
expect( file.getPackageName() ).toBe( checks.packageName );
} );
it( 'should reference the "es2015" entry', () => {
expect( file.getEntries().es2015 ).toBe( `esm2015/${ fileName }.js` );
} );
it( 'should reference the "esm2015" entry', () => {
expect( file.getEntries().esm2015 ).toBe( `esm2015/${ fileName }.js` );
} );
it( 'should reference the "esm5" entry', () => {
expect( file.getEntries().esm5 ).toBe( `esm5/${ fileName }.js` );
} );
it( 'should reference the "fesm2015" entry', () => {
expect( file.getEntries().fesm2015 ).toBe( `fesm2015/${ fileName }.js` );
} );
it( 'should reference the "fesm5" entry', () => {
expect( file.getEntries().fesm5 ).toBe( `fesm5/${ fileName }.js` );
} );
it( 'should reference the "module" entry (ESM5 build entry)', () => {
expect( file.getEntries().module ).toBe( `esm5/${ fileName }.js` );
} );
it( 'should reference the "main" entry (UMD bundle)', () => {
expect( file.getEntries().main ).toBe( `bundles/${ fileName }.umd.js` );
} );
it( 'should reference the "typings" entry', () => {
expect( file.getEntries().typings ).toBe( `${ fileName }.d.ts` );
} );
}
| expectPackageJson |
ObjectUtils.js | /**
* author: Samuel Gratzl
* email: [email protected]
* created: 2016-10-28T11:19:52.797Z
*/
export class | {
/**
* merges the second object into the first one
* @param target
* @param others
* @internal
* @returns {T}
*/
static merge(target, ...others) {
others = others.filter((o) => !!o); //is defined
if (others.length === 0) {
return target;
}
others.forEach((other) => Object.keys(other).forEach((key) => {
const v = other[key];
if (Object.prototype.toString.call(v) === '[object Object]') {
//nested
target[key] = (target[key] != null) ? ObjectUtils.merge(target[key], v) : v;
}
else {
target[key] = v;
}
}));
return target;
}
}
//# sourceMappingURL=ObjectUtils.js.map | ObjectUtils |
mocks_test.go | // No mocks are tested by this file.
//
// It supplies some mock implementations to other unit tests, and is
// named "...test.go" so it is only compiled under `go test`.
package mesh
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
// Add to peers a connection from peers.ourself to p
func (peers *Peers) AddTestConnection(p *Peer) {
summary := p.peerSummary
summary.Version = 0
toPeer := newPeerFromSummary(summary)
toPeer = peers.fetchWithDefault(toPeer) // Has side-effect of incrementing refcount
conn := newMockConnection(peers.ourself.Peer, toPeer)
peers.ourself.addConnection(conn)
peers.ourself.connectionEstablished(conn)
}
// Add to peers a connection from p1 to p2
func (peers *Peers) AddTestRemoteConnection(p1, p2 *Peer) {
fromPeer := newPeerFrom(p1)
fromPeer = peers.fetchWithDefault(fromPeer)
toPeer := newPeerFrom(p2)
toPeer = peers.fetchWithDefault(toPeer)
peers.ourself.addConnection(newRemoteConnection(fromPeer, toPeer, "", false, false))
}
func (peers *Peers) DeleteTestConnection(p *Peer) {
toName := p.Name
toPeer := peers.Fetch(toName)
peers.dereference(toPeer)
conn, _ := peers.ourself.ConnectionTo(toName)
peers.ourself.deleteConnection(conn)
}
// mockConnection used in testing is very similar to a
// RemoteConnection, without the RemoteTCPAddr(). We are making it a
// separate type in order to distinguish what is created by the test
// from what is created by the real code.
func newMockConnection(from, to *Peer) Connection {
type mockConnection struct{ *remoteConnection }
return &mockConnection{newRemoteConnection(from, to, "", false, false)}
}
func checkEqualConns(t *testing.T, ourName PeerName, got, wanted map[PeerName]Connection) {
checkConns := make(peerNameSet)
for _, conn := range wanted {
checkConns[conn.Remote().Name] = struct{}{}
}
for _, conn := range got {
remoteName := conn.Remote().Name
if _, found := checkConns[remoteName]; found {
delete(checkConns, remoteName)
} else {
require.FailNow(t, fmt.Sprintf("Unexpected connection from %s to %s", ourName, remoteName))
}
}
if len(checkConns) > 0 {
require.FailNow(t, fmt.Sprintf("Expected connections not found: from %s to %v", ourName, checkConns))
}
}
// Get all the peers from a Peers in a slice
func (peers *Peers) allPeers() []*Peer {
var res []*Peer
for _, peer := range peers.byName {
res = append(res, peer)
}
return res
}
func (peers *Peers) allPeersExcept(excludeName PeerName) []*Peer {
res := peers.allPeers()
for i, peer := range res {
if peer.Name == excludeName {
return append(res[:i], res[i+1:]...)
}
}
return res
}
// Check that the peers slice matches the wanted peers
func checkPeerArray(t *testing.T, peers []*Peer, wantedPeers ...*Peer) |
// Check that the peers slice matches the wanted peers and optionally
// all of their connections
func checkTopologyPeers(t *testing.T, checkConns bool, peers []*Peer, wantedPeers ...*Peer) {
check := make(map[PeerName]*Peer)
for _, peer := range wantedPeers {
check[peer.Name] = peer
}
for _, peer := range peers {
name := peer.Name
if wantedPeer, found := check[name]; found {
if checkConns {
checkEqualConns(t, name, peer.connections, wantedPeer.connections)
}
delete(check, name)
} else {
require.FailNow(t, fmt.Sprintf("Unexpected peer: %s", name))
}
}
if len(check) > 0 {
require.FailNow(t, fmt.Sprintf("Expected peers not found: %v", check))
}
}
| {
checkTopologyPeers(t, false, peers, wantedPeers...)
} |
main.rs | #![feature(test)]
//learn from https://medium.com/@james_32022/unit-tests-and-benchmarks-in-rust-f5de0a0ea19a
extern crate test;
use rand::{thread_rng, Rng};
use test::Bencher;
pub fn random_vector(i: i32) -> Vec<i32> {
let mut numbers: Vec<i32> = Vec::new();
let mut rng = rand::thread_rng();
for i in 0..i {
numbers.push(rng.gen());
}
return numbers;
}
pub fn swap(numbers: &mut Vec<i32>, i: usize, j: usize) {
let temp = numbers[i];
numbers[i] = numbers[j];
numbers[j] = temp;
}
pub fn | (numbers: &mut Vec<i32>) {
for i in 1..numbers.len() {
let mut j = i;
while j > 0 && numbers[j - 1] > numbers[j] {
swap(numbers, j, j - 1);
j = j - 1;
}
}
}
#[bench]
fn bench_insertion_sort_100_ints(b: &mut Bencher) {
b.iter(|| {
let mut numbers: Vec<i32> = random_vector(100);
insertion_sorter(&mut numbers)
});
}
fn main() {}
| insertion_sorter |
test_plugin.py | # coding=utf-8
# flake8: noqa E302
"""
Test plugin infrastructure and hooks.
"""
import sys
import pytest
# Python 3.5 had some regressions in the unitest.mock module, so use 3rd party mock if available
try:
import mock
except ImportError:
from unittest import mock
import cmd2
from cmd2 import plugin
class Plugin:
"""A mixin class for testing hook registration and calling"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset_counters()
def reset_counters(self):
self.called_preparse = 0
self.called_postparsing = 0
self.called_precmd = 0
self.called_postcmd = 0
self.called_cmdfinalization = 0
###
#
# preloop and postloop hooks
# which share the same signature and are thus interchangable
#
###
def prepost_hook_one(self) -> None:
"""Method used for preloop or postloop hooks"""
self.poutput("one")
def prepost_hook_two(self) -> None:
"""Another method used for preloop or postloop hooks"""
self.poutput("two")
def prepost_hook_too_many_parameters(self, param) -> None:
"""A preloop or postloop hook with too many parameters"""
pass
def prepost_hook_with_wrong_return_annotation(self) -> bool:
"""A preloop or postloop hook with incorrect return type"""
pass
###
#
# preparse hook
#
###
def preparse(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""Preparsing hook"""
self.called_preparse += 1
return data
###
#
# Postparsing hooks
#
###
def postparse_hook(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook"""
self.called_postparsing += 1
return data
def postparse_hook_stop(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with requests application exit"""
self.called_postparsing += 1
data.stop = True
return data
def postparse_hook_emptystatement(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with raises an EmptyStatement exception"""
self.called_postparsing += 1
raise cmd2.EmptyStatement
def postparse_hook_exception(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook which raises an exception"""
self.called_postparsing += 1
raise ValueError
def postparse_hook_too_many_parameters(self, data1, data2) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with too many parameters"""
pass
def postparse_hook_undeclared_parameter_annotation(self, data) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with an undeclared parameter type"""
pass
def postparse_hook_wrong_parameter_annotation(self, data: str) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with the wrong parameter type"""
pass
def postparse_hook_undeclared_return_annotation(self, data: cmd2.plugin.PostparsingData):
"""A postparsing hook with an undeclared return type"""
pass
def postparse_hook_wrong_return_annotation(self, data: cmd2.plugin.PostparsingData) -> str:
"""A postparsing hook with the wrong return type"""
pass
###
#
# precommand hooks, some valid, some invalid
#
###
def precmd(self, statement: cmd2.Statement) -> cmd2.Statement:
"""Override cmd.Cmd method"""
self.called_precmd += 1
return statement
def precmd_hook(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook"""
self.called_precmd += 1
return data
def precmd_hook_emptystatement(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook which raises an EmptyStatement exception"""
self.called_precmd += 1
raise cmd2.EmptyStatement
def precmd_hook_exception(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook which raises an exception"""
self.called_precmd += 1
raise ValueError
def precmd_hook_not_enough_parameters(self) -> plugin.PrecommandData:
"""A precommand hook with no parameters"""
pass
def precmd_hook_too_many_parameters(self, one: plugin.PrecommandData, two: str) -> plugin.PrecommandData:
"""A precommand hook with too many parameters"""
return one
def precmd_hook_no_parameter_annotation(self, data) -> plugin.PrecommandData:
"""A precommand hook with no type annotation on the parameter"""
return data
def precmd_hook_wrong_parameter_annotation(self, data: str) -> plugin.PrecommandData:
"""A precommand hook with the incorrect type annotation on the parameter"""
return data
def precmd_hook_no_return_annotation(self, data: plugin.PrecommandData):
"""A precommand hook with no type annotation on the return value"""
return data
def precmd_hook_wrong_return_annotation(self, data: plugin.PrecommandData) -> cmd2.Statement:
return self.statement_parser.parse('hi there')
###
#
# postcommand hooks, some valid, some invalid
#
###
def postcmd(self, stop: bool, statement: cmd2.Statement) -> bool:
"""Override cmd.Cmd method"""
self.called_postcmd += 1
return stop
def postcmd_hook(self, data: plugin.PostcommandData) -> plugin.PostcommandData:
"""A postcommand hook"""
self.called_postcmd += 1
return data
def postcmd_hook_exception(self, data: plugin.PostcommandData) -> plugin.PostcommandData:
"""A postcommand hook with raises an exception"""
self.called_postcmd += 1
raise ZeroDivisionError
def postcmd_hook_not_enough_parameters(self) -> plugin.PostcommandData:
"""A precommand hook with no parameters"""
pass
def postcmd_hook_too_many_parameters(self, one: plugin.PostcommandData, two: str) -> plugin.PostcommandData:
"""A precommand hook with too many parameters"""
return one
def postcmd_hook_no_parameter_annotation(self, data) -> plugin.PostcommandData:
"""A precommand hook with no type annotation on the parameter"""
return data
def postcmd_hook_wrong_parameter_annotation(self, data: str) -> plugin.PostcommandData:
"""A precommand hook with the incorrect type annotation on the parameter"""
return data
def postcmd_hook_no_return_annotation(self, data: plugin.PostcommandData):
"""A precommand hook with no type annotation on the return value"""
return data
def postcmd_hook_wrong_return_annotation(self, data: plugin.PostcommandData) -> cmd2.Statement:
return self.statement_parser.parse('hi there')
###
#
# command finalization hooks, some valid, some invalid
#
###
def cmdfinalization_hook(self, data: plugin.CommandFinalizationData) -> plugin.CommandFinalizationData:
"""A command finalization hook."""
self.called_cmdfinalization += 1
return data
def cmdfinalization_hook_stop(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which requests application exit"""
self.called_cmdfinalization += 1
data.stop = True
return data
def cmdfinalization_hook_exception(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which raises an exception"""
self.called_cmdfinalization += 1
raise ValueError
def cmdfinalization_hook_not_enough_parameters(self) -> plugin.CommandFinalizationData:
"""A command finalization hook with no parameters."""
pass
def cmdfinalization_hook_too_many_parameters(self, one: plugin.CommandFinalizationData, two: str) -> plugin.CommandFinalizationData:
"""A command finalization hook with too many parameters."""
return one
def cmdfinalization_hook_no_parameter_annotation(self, data) -> plugin.CommandFinalizationData:
"""A command finalization hook with no type annotation on the parameter."""
return data
def cmdfinalization_hook_wrong_parameter_annotation(self, data: str) -> plugin.CommandFinalizationData:
"""A command finalization hook with the incorrect type annotation on the parameter."""
return data
def cmdfinalization_hook_no_return_annotation(self, data: plugin.CommandFinalizationData):
"""A command finalizationhook with no type annotation on the return value."""
return data
def cmdfinalization_hook_wrong_return_annotation(self, data: plugin.CommandFinalizationData) -> cmd2.Statement:
"""A command finalization hook with the wrong return type annotation."""
return self.statement_parser.parse('hi there')
class PluggedApp(Plugin, cmd2.Cmd):
"""A sample app with a plugin mixed in"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_say(self, statement):
"""Repeat back the arguments"""
self.poutput(statement)
###
#
# test pre and postloop hooks
#
###
def test_register_preloop_hook_too_many_parameters():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_preloop_hook(app.prepost_hook_too_many_parameters)
def test_register_preloop_hook_with_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_preloop_hook(app.prepost_hook_with_wrong_return_annotation)
def test_preloop_hook(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_preloop_hook(app.prepost_hook_one)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'one\nhello\n'
assert not err
def test_preloop_hooks(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_preloop_hook(app.prepost_hook_one)
app.register_preloop_hook(app.prepost_hook_two)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'one\ntwo\nhello\n'
assert not err
def test_register_postloop_hook_too_many_parameters():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postloop_hook(app.prepost_hook_too_many_parameters)
def test_register_postloop_hook_with_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postloop_hook(app.prepost_hook_with_wrong_return_annotation)
def test_postloop_hook(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_postloop_hook(app.prepost_hook_one)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'hello\none\n'
assert not err
def test_postloop_hooks(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_postloop_hook(app.prepost_hook_one)
app.register_postloop_hook(app.prepost_hook_two)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'hello\none\ntwo\n'
assert not err
###
#
# test preparse hook
#
###
def test_preparse(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.preparse)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_preparse == 1
###
#
# test postparsing hooks
#
###
def | ():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_too_many_parameters)
def test_postparsing_hook_undeclared_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_undeclared_parameter_annotation)
def test_postparsing_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_wrong_parameter_annotation)
def test_postparsing_hook_undeclared_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_undeclared_return_annotation)
def test_postparsing_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_wrong_return_annotation)
def test_postparsing_hook(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert not app.called_postparsing
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 1
# register the function again, so it should be called twice
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 2
def test_postparsing_hook_stop_first(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert stop
# register another function but it shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert stop
def test_postparsing_hook_stop_second(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert not stop
# register another function and make sure it gets called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 2
assert stop
# register a third function which shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 2
assert stop
def test_postparsing_hook_emptystatement_first(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 1
def test_postparsing_hook_emptystatement_second(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 1
# register another function and make sure it gets called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 2
# register a third function which shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 2
def test_postparsing_hook_exception(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert err
assert app.called_postparsing == 1
# register another function, but it shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert err
assert app.called_postparsing == 1
###
#
# test precmd hooks
#
#####
def test_register_precmd_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_too_many_parameters)
def test_register_precmd_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_no_parameter_annotation)
def test_register_precmd_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_wrong_parameter_annotation)
def test_register_precmd_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_no_return_annotation)
def test_register_precmd_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_wrong_return_annotation)
def test_precmd_hook(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# without registering any hooks, precmd() should be called
assert app.called_precmd == 1
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_precmd == 2
# register the function again, so it should be called twice
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with two hooks registered, we should get precmd() and both hooks
assert app.called_precmd == 3
def test_precmd_hook_emptystatement_first(capsys):
app = PluggedApp()
app.register_precmd_hook(app.precmd_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# since the registered hooks are called before precmd(), if a registered
# hook throws an exception, precmd() is never called
assert app.called_precmd == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents precmd() from being
# called
assert app.called_precmd == 1
def test_precmd_hook_emptystatement_second(capsys):
app = PluggedApp()
app.register_precmd_hook(app.precmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_precmd == 2
# register another function and make sure it gets called
app.reset_counters()
app.register_precmd_hook(app.precmd_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# since the registered hooks are called before precmd(), if a registered
# hook throws an exception, precmd() is never called
assert app.called_precmd == 2
# register a third function which shouldn't be called
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# the exception raised by the second hook should prevent the third
# hook from being called. since the registered hooks are called before precmd(),
# if a registered hook throws an exception, precmd() is never called
assert app.called_precmd == 2
###
#
# test postcmd hooks
#
####
def test_register_postcmd_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_too_many_parameters)
def test_register_postcmd_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_no_parameter_annotation)
def test_register_postcmd_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_wrong_parameter_annotation)
def test_register_postcmd_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_no_return_annotation)
def test_register_postcmd_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_wrong_return_annotation)
def test_postcmd(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# without registering any hooks, postcmd() should be called
assert app.called_postcmd == 1
app.reset_counters()
app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_postcmd == 2
# register the function again, so it should be called twice
app.reset_counters()
app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with two hooks registered, we should get precmd() and both hooks
assert app.called_postcmd == 3
def test_postcmd_exception_first(capsys):
app = PluggedApp()
app.register_postcmd_hook(app.postcmd_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# since the registered hooks are called before postcmd(), if a registered
# hook throws an exception, postcmd() is never called. So we should have
# a count of one because we called the hook that raised the exception
assert app.called_postcmd == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents postcmd() from being
# called
assert app.called_postcmd == 1
def test_postcmd_exception_second(capsys):
app = PluggedApp()
app.register_postcmd_hook(app.postcmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
# with one hook registered, we should get the hook and postcmd()
assert app.called_postcmd == 2
# register another function which should be called
app.reset_counters()
stop = app.register_postcmd_hook(app.postcmd_hook_exception)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents postcmd() from being
# called. So we have the first hook, and the second hook, which raised
# the exception
assert app.called_postcmd == 2
##
#
# command finalization
#
###
def test_register_cmdfinalization_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_too_many_parameters)
def test_register_cmdfinalization_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_no_parameter_annotation)
def test_register_cmdfinalization_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_wrong_parameter_annotation)
def test_register_cmdfinalization_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_no_return_annotation)
def test_register_cmdfinalization_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_wrong_return_annotation)
def test_cmdfinalization(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 0
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 1
# register the function again, so it should be called twice
app.reset_counters()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
def test_cmdfinalization_stop_first(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_stop)
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
assert stop
def test_cmdfinalization_stop_second(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.register_cmdfinalization_hook(app.cmdfinalization_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
assert stop
def test_cmdfinalization_hook_exception(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
assert app.called_cmdfinalization == 1
# register another function, but it shouldn't be called
app.reset_counters()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
assert app.called_cmdfinalization == 1
| test_postparsing_hook_too_many_parameters |
sample_advection.py | """
sample code for LLOCK, SLOCK, LSLOCK
application the method to advection model (periodic boundary condition)
"""
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sys.path.append("..")
from pyassim import KalmanFilter, LocalLOCK, SpatiallyUniformLOCK, LSLOCK,\
PeriodicAdvection, EulerScheme
def | ():
result_dir = "figures/advection"
if not os.path.exists(result_dir):
os.mkdir(result_dir)
seed = 121
np.random.seed(seed)
# parameters
N = 20
x0 = np.exp(-(np.arange(N)-N//2)**2/20)
dt = 0.01
dx = 1
c = 1
sys_sd = 0.001
obs_sd = 0.1
timestep = 10000
ds = 100
# generate data
model = PeriodicAdvection(dx, c, dt, scheme="LW")
scheme = EulerScheme(dt, timestep, model, seed=seed)
true, obs = scheme.noise_added_simulation(x0, sys_sd, obs_sd)
# setup matrices
# adjacency matrix
A = np.eye(N)
A[np.arange(N-1), np.arange(1,N)] = 2
A[np.arange(1,N), np.arange(N-1)] = 3
A[0,-1] = 3
A[-1,0] = 2
# A[np.arange(N-2), np.arange(2,N)] = True
# A[np.arange(2,N), np.arange(N-2)] = True
# A[0,-2] = A[-2,0] = A[1,-1] = A[-1,1] = True
# initial transition matrix
F = np.eye(N)
H = np.eye(N)
# covariance
Q = obs_sd**2 * np.eye(N)
R = obs_sd**2 * np.eye(N)
V0 = obs_sd**2 * np.eye(N)
# execution
kf = KalmanFilter(obs[::ds], x0, V0, F, H, Q, R, em_vars=["transition_matrices"])
kf.em(n_iter=10)
kf.forward()
llock = LocalLOCK(obs[::ds], x0, V0, F, H, Q, R, A.astype(bool), method="elementwise",
estimation_length=20, estimation_interval=5, eta=1.0,
cutoff=10, estimation_mode="forward")
llock.forward()
slock = SpatiallyUniformLOCK(obs[::ds], x0, V0, F, H, Q, R, np.zeros(N), A,
estimation_length=1, estimation_interval=1, eta=1.,
cutoff=10., estimation_mode="forward")
slock.forward()
lslock = LSLOCK(obs[::ds], x0, V0, F, H, Q, R, A, method="gridwise",
estimation_length=10, estimation_interval=5, eta=1.,
cutoff=10., estimation_mode="forward")
lslock.forward()
# draw results
dim=0
plt.figure(figsize=(8,5))
plt.scatter(np.arange(timestep//ds), obs[::ds,dim], label="obs", c="k")
plt.plot(true[::ds,dim], label="true", c="cyan", ls="--")
plt.plot(kf.get_filtered_value(dim), label="kf w/ EM")
plt.plot(llock.get_filtered_value(dim), label="llock")
plt.plot(slock.get_filtered_value(dim), label="slock")
plt.plot(lslock.get_filtered_value(dim), label="lslock")
plt.legend()
plt.savefig(os.path.join(result_dir, "dim{}_estimated.pdf".format(dim)), bbox_inches="tight")
fig, ax = plt.subplots(2,2,figsize=(10,10))
vmin, vmax = obs.min(), obs.max()
sns.heatmap(true[::ds], cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[0,0])
sns.heatmap(llock.get_filtered_value(), cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[0,1])
sns.heatmap(slock.get_filtered_value(), cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[1,0])
sns.heatmap(lslock.get_filtered_value(), cmap="Blues", vmin=vmin, vmax=vmax, ax=ax[1,1])
ax[0,0].set_title("True")
ax[0,1].set_title("LLOCK")
ax[1,0].set_title("SLOCK")
ax[1,1].set_title("LSLOCK")
for i in range(2):
for j in range(2):
ax[i,j].set_xlabel("space")
ax[i,j].set_ylabel("timestep")
fig.savefig(os.path.join(result_dir, "estimated.pdf"))
if __name__ == "__main__":
main() | main |
_loc_scale.py | """
This module contains description of class for probability distributions
from location-scale family.
"""
from method_of_moments.continuous._base_continuous import BaseContinuous
class LocScale(BaseContinuous):
"""
Class for probability distributions from location-scale family.
Parameters
----------
loc : float, optional, default: 0.0
Location parameter of a probability distribution.
scale : float, optional, default: 1.0
Scale parameter of a probability distribution.
**kwargs : `base.BaseDistribution` properties.
Methods
-------
get_standard_mean(mean)
Return mean value for standard distribution in location-scale family.
get_standard_variance(variance)
Return variance for standard distribution in location-scale family.
Raises
------
ValueError
If `scale` is non-positive number.
"""
def __init__(self, loc: float = 0.0, scale: float = 1.0, **kwargs) -> None:
"""Initialize self. See help(type(self)) for accurate signature."""
super().__init__(**kwargs)
self.loc = loc
self.scale = scale
@property
def loc(self) -> float:
"""Return location parameter of a probability distribution."""
return self.__loc
@loc.setter
def loc(self, loc: float = 0.0) -> None:
"""Property setter for `self.loc`."""
self.__loc = loc
@property
def scale(self) -> float:
|
@scale.setter
def scale(self, scale: float = 1.0) -> None:
"""Property setter for `self.scale`."""
if scale <= 0:
raise ValueError('`scale` value must be positive.')
self.__scale = scale
def get_standard_mean(self, mean: float):
"""
Return mean value for standard distribution in location-scale family.
"""
return (mean - self.loc) / self.scale
def get_standard_variance(self, variance: float):
"""
Return variance for standard distribution in location-scale family.
"""
return variance / self.scale ** 2
| """Return scale parameter of a probability distribution."""
return self.__scale |
tasks.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from app import celery
from celery.utils.log import get_task_logger
import time
logger = get_task_logger(__name__)
# 定时导入
current_time = str(time.strftime('%Y-%m-%d %H:%M:%S'))
@celery.task(name="task1")
def task1():
print u"定时任务task1:每5秒执行一次" + current_time
# 记录日志
logger.info(u"导入成功")
@celery.task(name="task2")
def task2():
# 记录日志
print u"定时任务task2:每10秒 | + current_time
logger.info(u"echo成功")
| 执行一次" |
dataset_utils.py | import os
import torch
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms.functional as F
from torchvision import transforms, utils
from PIL import Image
class resized_dataset(Dataset):
def __init__(self, dataset, transform=None, start=None, end=None, resize=None):
self.data=[]
if start == None: start = 0
if end == None: end = dataset.__len__()
if resize==None:
for i in range(start, end):
self.data.append((*dataset.__getitem__(i)))
else:
for i in range(start, end):
item=dataset.__getitem__(i)
self.data.append((F.center_crop(F.resize(item[0],resize,Image.BILINEAR),resize),item[1]))
self.transform = transform
|
def __getitem__(self, idx):
if self.transform:
return (self.transform(self.data[idx][0]), self.data[idx][1])
else:
return self.data[idx] | def __len__(self):
return len(self.data) |
stat.py | from typing import Sequence
import numpy as np
import xarray |
def get_longest_run_start_index(
arr: DataArray,
window: int = 1,
dim: str = "time",
) -> DataArray:
return xarray.apply_ufunc(
get_index_of_longest_run,
arr,
input_core_dims=[[dim]],
kwargs={"window": window},
vectorize=True,
dask="parallelized",
output_dtypes=[float],
)
def get_index_of_longest_run(arr: Sequence[bool], window: int = 1) -> int:
values, rl, pos = rle_1d(arr)
if not np.any(values) or np.all(values * rl < window): # type:ignore
return 0
index_of_max = np.nanargmax(
np.where(values * rl >= window, rl, np.NaN) # type:ignore
)
return pos[index_of_max] # type:ignore
def get_first_occurrence_index(da: DataArray) -> DataArray:
"""
Return the index of the first True value in the 3D booleans array along
time dimension.
"""
stacked = da.stack(latlon=("lat", "lon"))
res = stacked.argmax("time")
return res.unstack() | from xarray import DataArray
from xclim.indices.run_length import rle_1d
|
test_media_player.py | """The tests for Monoprice Media player platform."""
from collections import defaultdict
from serial import SerialException
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_VOLUME_LEVEL,
DOMAIN as MEDIA_PLAYER_DOMAIN,
SERVICE_SELECT_SOURCE,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.components.monoprice.const import (
CONF_SOURCES,
DOMAIN,
SERVICE_RESTORE,
SERVICE_SNAPSHOT,
)
from homeassistant.const import (
CONF_PORT,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
)
from homeassistant.helpers.entity_component import async_update_entity
from tests.async_mock import patch
from tests.common import MockConfigEntry
MOCK_CONFIG = {CONF_PORT: "fake port", CONF_SOURCES: {"1": "one", "3": "three"}}
MOCK_OPTIONS = {CONF_SOURCES: {"2": "two", "4": "four"}}
ZONE_1_ID = "media_player.zone_11"
ZONE_2_ID = "media_player.zone_12"
class AttrDict(dict):
"""Helper class for mocking attributes."""
def __setattr__(self, name, value):
"""Set attribute."""
self[name] = value
def __getattr__(self, item):
"""Get attribute."""
return self[item]
class MockMonoprice:
"""Mock for pymonoprice object."""
def __init__(self):
"""Init mock object."""
self.zones = defaultdict(
lambda: AttrDict(power=True, volume=0, mute=True, source=1)
)
def zone_status(self, zone_id):
"""Get zone status."""
status = self.zones[zone_id]
status.zone = zone_id
return AttrDict(status)
def set_source(self, zone_id, source_idx):
"""Set source for zone."""
self.zones[zone_id].source = source_idx
def set_power(self, zone_id, power):
"""Turn zone on/off."""
self.zones[zone_id].power = power
def set_mute(self, zone_id, mute):
"""Mute/unmute zone."""
self.zones[zone_id].mute = mute
def set_volume(self, zone_id, volume):
"""Set volume for zone."""
self.zones[zone_id].volume = volume
def restore_zone(self, zone):
"""Restore zone status."""
self.zones[zone.zone] = AttrDict(zone)
async def test_cannot_connect(hass):
"""Test connection error."""
with patch(
"homeassistant.components.monoprice.get_monoprice", side_effect=SerialException,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
# setup_component(self.hass, DOMAIN, MOCK_CONFIG)
# self.hass.async_block_till_done()
await hass.async_block_till_done()
assert hass.states.get(ZONE_1_ID) is None
async def _setup_monoprice(hass, monoprice):
with patch(
"homeassistant.components.monoprice.get_monoprice", new=lambda *a: monoprice,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
# setup_component(self.hass, DOMAIN, MOCK_CONFIG)
# self.hass.async_block_till_done()
await hass.async_block_till_done()
async def _setup_monoprice_with_options(hass, monoprice):
with patch(
"homeassistant.components.monoprice.get_monoprice", new=lambda *a: monoprice,
):
config_entry = MockConfigEntry(
domain=DOMAIN, data=MOCK_CONFIG, options=MOCK_OPTIONS
)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
# setup_component(self.hass, DOMAIN, MOCK_CONFIG)
# self.hass.async_block_till_done()
await hass.async_block_till_done()
async def _call_media_player_service(hass, name, data):
await hass.services.async_call(
MEDIA_PLAYER_DOMAIN, name, service_data=data, blocking=True
)
async def _call_homeassistant_service(hass, name, data):
await hass.services.async_call(
"homeassistant", name, service_data=data, blocking=True
)
async def _call_monoprice_service(hass, name, data):
await hass.services.async_call(DOMAIN, name, service_data=data, blocking=True)
async def test_service_calls_with_entity_id(hass):
"""Test snapshot save/restore service calls."""
await _setup_monoprice(hass, MockMonoprice())
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
# Saving existing values
await _call_monoprice_service(hass, SERVICE_SNAPSHOT, {"entity_id": ZONE_1_ID})
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 1.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "three"}
)
# Restoring other media player to its previous state
# The zone should not be restored
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": ZONE_2_ID})
await hass.async_block_till_done()
# Checking that values were not (!) restored
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 1.0
assert state.attributes[ATTR_INPUT_SOURCE] == "three"
# Restoring media player to its previous state
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": ZONE_1_ID})
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.0
assert state.attributes[ATTR_INPUT_SOURCE] == "one"
async def test_service_calls_with_all_entities(hass):
"""Test snapshot save/restore service calls."""
await _setup_monoprice(hass, MockMonoprice())
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
# Saving existing values
await _call_monoprice_service(hass, SERVICE_SNAPSHOT, {"entity_id": "all"})
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 1.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "three"}
)
# Restoring media player to its previous state
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": "all"})
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.0
assert state.attributes[ATTR_INPUT_SOURCE] == "one"
async def test_service_calls_without_relevant_entities(hass):
"""Test snapshot save/restore service calls."""
await _setup_monoprice(hass, MockMonoprice())
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
# Saving existing values
await _call_monoprice_service(hass, SERVICE_SNAPSHOT, {"entity_id": "all"})
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 1.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "three"}
)
# Restoring media player to its previous state
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": "light.demo"})
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 1.0
assert state.attributes[ATTR_INPUT_SOURCE] == "three"
async def test_restore_without_snapshort(hass):
"""Test restore when snapshot wasn't called."""
await _setup_monoprice(hass, MockMonoprice())
with patch.object(MockMonoprice, "restore_zone") as method_call:
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": ZONE_1_ID})
await hass.async_block_till_done()
assert not method_call.called
async def test_update(hass):
"""Test updating values from monoprice."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
monoprice.set_source(11, 3)
monoprice.set_volume(11, 38)
await async_update_entity(hass, ZONE_1_ID)
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 1.0
assert state.attributes[ATTR_INPUT_SOURCE] == "three"
async def | (hass):
"""Test updating failure from monoprice."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
monoprice.set_source(11, 3)
monoprice.set_volume(11, 38)
with patch.object(MockMonoprice, "zone_status", side_effect=SerialException):
await async_update_entity(hass, ZONE_1_ID)
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.0
assert state.attributes[ATTR_INPUT_SOURCE] == "one"
async def test_empty_update(hass):
"""Test updating with no state from monoprice."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
monoprice.set_source(11, 3)
monoprice.set_volume(11, 38)
with patch.object(MockMonoprice, "zone_status", return_value=None):
await async_update_entity(hass, ZONE_1_ID)
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.0
assert state.attributes[ATTR_INPUT_SOURCE] == "one"
async def test_supported_features(hass):
"""Test supported features property."""
await _setup_monoprice(hass, MockMonoprice())
state = hass.states.get(ZONE_1_ID)
assert (
SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_STEP
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
== state.attributes["supported_features"]
)
async def test_source_list(hass):
"""Test source list property."""
await _setup_monoprice(hass, MockMonoprice())
state = hass.states.get(ZONE_1_ID)
# Note, the list is sorted!
assert state.attributes[ATTR_INPUT_SOURCE_LIST] == ["one", "three"]
async def test_source_list_with_options(hass):
"""Test source list property."""
await _setup_monoprice_with_options(hass, MockMonoprice())
state = hass.states.get(ZONE_1_ID)
# Note, the list is sorted!
assert state.attributes[ATTR_INPUT_SOURCE_LIST] == ["two", "four"]
async def test_select_source(hass):
"""Test source selection methods."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
await _call_media_player_service(
hass,
SERVICE_SELECT_SOURCE,
{"entity_id": ZONE_1_ID, ATTR_INPUT_SOURCE: "three"},
)
assert monoprice.zones[11].source == 3
# Trying to set unknown source
await _call_media_player_service(
hass,
SERVICE_SELECT_SOURCE,
{"entity_id": ZONE_1_ID, ATTR_INPUT_SOURCE: "no name"},
)
assert monoprice.zones[11].source == 3
async def test_unknown_source(hass):
"""Test behavior when device has unknown source."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
monoprice.set_source(11, 5)
await async_update_entity(hass, ZONE_1_ID)
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes.get(ATTR_INPUT_SOURCE) is None
async def test_turn_on_off(hass):
"""Test turning on the zone."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
await _call_media_player_service(hass, SERVICE_TURN_OFF, {"entity_id": ZONE_1_ID})
assert not monoprice.zones[11].power
await _call_media_player_service(hass, SERVICE_TURN_ON, {"entity_id": ZONE_1_ID})
assert monoprice.zones[11].power
async def test_mute_volume(hass):
"""Test mute functionality."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.5}
)
await _call_media_player_service(
hass, SERVICE_VOLUME_MUTE, {"entity_id": ZONE_1_ID, "is_volume_muted": False}
)
assert not monoprice.zones[11].mute
await _call_media_player_service(
hass, SERVICE_VOLUME_MUTE, {"entity_id": ZONE_1_ID, "is_volume_muted": True}
)
assert monoprice.zones[11].mute
async def test_volume_up_down(hass):
"""Test increasing volume by one."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
assert monoprice.zones[11].volume == 0
await _call_media_player_service(
hass, SERVICE_VOLUME_DOWN, {"entity_id": ZONE_1_ID}
)
# should not go below zero
assert monoprice.zones[11].volume == 0
await _call_media_player_service(hass, SERVICE_VOLUME_UP, {"entity_id": ZONE_1_ID})
assert monoprice.zones[11].volume == 1
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 1.0}
)
assert monoprice.zones[11].volume == 38
await _call_media_player_service(hass, SERVICE_VOLUME_UP, {"entity_id": ZONE_1_ID})
# should not go above 38
assert monoprice.zones[11].volume == 38
await _call_media_player_service(
hass, SERVICE_VOLUME_DOWN, {"entity_id": ZONE_1_ID}
)
assert monoprice.zones[11].volume == 37
| test_failed_update |
lib.rs | use proc_macro2::TokenStream;
use quote::{quote, ToTokens};
#[proc_macro_derive(Marshal)]
pub fn derive_marshal(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let ast: syn::DeriveInput = syn::parse(input).unwrap();
match ast.data {
syn::Data::Struct(data) => {
make_struct_marshal_impl(&ast.ident, &ast.generics, &data.fields).into()
}
_ => unimplemented!("Nothing but structs can be derived on right now"),
}
}
#[proc_macro_derive(Unmarshal)]
pub fn derive_unmarshal(input: proc_macro::TokenStream) -> proc_macro::TokenStream |
#[proc_macro_derive(Signature)]
pub fn derive_signature(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let ast: syn::DeriveInput = syn::parse(input).unwrap();
match ast.data {
syn::Data::Struct(data) => {
make_struct_signature_impl(&ast.ident, &ast.generics, &data.fields).into()
}
_ => unimplemented!("Nothing but structs can be derived on right now"),
}
}
fn make_struct_marshal_impl(
ident: &syn::Ident,
generics: &syn::Generics,
fields: &syn::Fields,
) -> TokenStream {
let (impl_gen, typ_gen, clause_gen) = generics.split_for_impl();
let marshal = struct_field_marshal(fields);
quote! {
impl #impl_gen ::rustbus::Marshal for #ident #typ_gen #clause_gen {
#[inline]
fn marshal(&self, ctx: &mut ::rustbus::wire::marshal::MarshalContext<'_,'_>) -> Result<(), ::rustbus::Error> {
#marshal
}
}
}
}
fn make_struct_unmarshal_impl(
ident: &syn::Ident,
generics: &syn::Generics,
fields: &syn::Fields,
) -> TokenStream {
let marshal = struct_field_unmarshal(fields);
let mut rdef = syn::LifetimeDef {
attrs: Vec::new(),
lifetime: syn::Lifetime::new("'__internal_r", proc_macro2::Span::call_site()),
colon_token: None,
bounds: syn::punctuated::Punctuated::new(),
};
let mut bufdef = syn::LifetimeDef {
attrs: Vec::new(),
lifetime: syn::Lifetime::new("'__internal_buf", proc_macro2::Span::call_site()),
colon_token: None,
bounds: syn::punctuated::Punctuated::new(),
};
bufdef.bounds.push(rdef.lifetime.clone());
let fdsdef = syn::LifetimeDef {
attrs: Vec::new(),
lifetime: syn::Lifetime::new("'__internal_fds", proc_macro2::Span::call_site()),
colon_token: None,
bounds: syn::punctuated::Punctuated::new(),
};
let mut new_generics = generics.clone();
new_generics
.lifetimes()
.for_each(|lt| rdef.bounds.push(lt.lifetime.clone()));
let typ_generics = new_generics.clone();
let (_, typ_gen, _) = typ_generics.split_for_impl();
new_generics.params = vec![
syn::GenericParam::Lifetime(rdef),
syn::GenericParam::Lifetime(bufdef),
syn::GenericParam::Lifetime(fdsdef),
]
.into_iter()
.chain(new_generics.params)
.collect();
let (impl_gen, _, clause_gen) = new_generics.split_for_impl();
quote! {
impl #impl_gen ::rustbus::Unmarshal<'__internal_r, '__internal_buf, '__internal_fds> for #ident #typ_gen #clause_gen {
#[inline]
fn unmarshal(ctx: &mut ::rustbus::wire::unmarshal::UnmarshalContext<'__internal_fds,'__internal_buf>) -> Result<(usize,Self), ::rustbus::wire::unmarshal::Error> {
#marshal
}
}
}
}
fn make_struct_signature_impl(
ident: &syn::Ident,
generics: &syn::Generics,
fields: &syn::Fields,
) -> TokenStream {
let (impl_gen, typ_gen, clause_gen) = generics.split_for_impl();
let signature = struct_field_sigs(fields);
quote! {
impl #impl_gen ::rustbus::Signature for #ident #typ_gen #clause_gen {
#[inline]
fn signature() -> ::rustbus::signature::Type {
#signature
}
fn alignment() -> usize {
8
}
}
}
}
fn struct_field_marshal(fields: &syn::Fields) -> TokenStream {
let field_names = fields
.iter()
.map(|field| field.ident.as_ref().unwrap().to_token_stream());
quote! {
ctx.align_to(8);
#(
self.#field_names.marshal(ctx)?;
)*
Ok(())
}
}
fn struct_field_unmarshal(fields: &syn::Fields) -> TokenStream {
let field_names = fields
.iter()
.map(|field| field.ident.as_ref().unwrap().to_token_stream());
let field_types = fields.iter().map(|field| field.ty.to_token_stream());
quote! {
let start_offset = ctx.offset;
ctx.align_to(8)?;
let this = Self{
#(
#field_names: <#field_types as ::rustbus::Unmarshal>::unmarshal(ctx)?.1,
)*
};
let total_bytes = ctx.offset - start_offset;
Ok((total_bytes, this))
}
}
fn struct_field_sigs(fields: &syn::Fields) -> TokenStream {
let field_types = fields
.iter()
.map(|field| field.ty.to_token_stream())
.collect::<Vec<_>>();
if field_types.is_empty() {
panic!("Signature can not be derived for empty structs!")
}
quote! {
let mut sigs = vec![];
#(
sigs.push(<#field_types as rustbus::Signature>::signature());
)*
::rustbus::signature::Type::Container(::rustbus::signature::Container::Struct(
::rustbus::signature::StructTypes::new(sigs).unwrap()
))
}
}
| {
let ast: syn::DeriveInput = syn::parse(input).unwrap();
match ast.data {
syn::Data::Struct(data) => {
make_struct_unmarshal_impl(&ast.ident, &ast.generics, &data.fields).into()
}
_ => unimplemented!("Nothing but structs can be derived on right now"),
}
} |
less.rs | use std::process::Command;
pub fn retrieve_less_version() -> Option<usize> {
let cmd = Command::new("less").arg("--version").output().ok()?;
parse_less_version(&cmd.stdout)
}
fn parse_less_version(output: &[u8]) -> Option<usize> {
if output.starts_with(b"less ") {
let version = std::str::from_utf8(&output[5..]).ok()?;
let end = version.find(' ')?;
version[..end].parse::<usize>().ok()
} else {
None
}
}
#[test]
fn test_parse_less_version_487() {
let output = b"less 487 (GNU regular expressions)
Copyright (C) 1984-2016 Mark Nudelman
less comes with NO WARRANTY, to the extent permitted by law.
For information about the terms of redistribution,
see the file named README in the less distribution.
Homepage: http://www.greenwoodsoftware.com/less";
assert_eq!(Some(487), parse_less_version(output));
}
#[test]
fn test_parse_less_version_529() {
let output = b"less 529 (Spencer V8 regular expressions)
Copyright (C) 1984-2017 Mark Nudelman
less comes with NO WARRANTY, to the extent permitted by law.
For information about the terms of redistribution,
see the file named README in the less distribution.
Homepage: http://www.greenwoodsoftware.com/less";
assert_eq!(Some(529), parse_less_version(output));
}
#[test]
fn test_parse_less_version_551() {
let output = b"less 551 (PCRE regular expressions)
Copyright (C) 1984-2019 Mark Nudelman
less comes with NO WARRANTY, to the extent permitted by law.
For information about the terms of redistribution,
see the file named README in the less distribution.
Home page: http://www.greenwoodsoftware.com/less";
assert_eq!(Some(551), parse_less_version(output));
}
#[test]
fn | () {
let output = b"more from util-linux 2.34";
assert_eq!(None, parse_less_version(output));
}
| test_parse_less_version_wrong_program |
rpc_error_codes_util.go | /*
* Copyright (c) 2017, https://github.com/nebulaim
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mtproto
import (
"fmt"
"github.com/golang/glog"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// FILE_MIGRATE_X = 303000;
// PHONE_MIGRATE_X = 303001;
// NETWORK_MIGRATE_X = 303002;
// USER_MIGRATE_X = 303003;
//
// ERROR_SEE_OTHER code has _X is dc number, We use custom NewXXXX()
func NewFileMigrateX(dc int32, message string) *TLRpcError {
return &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(TLRpcErrorCodes_ERROR_SEE_OTHER),
ErrorMessage: fmt.Sprintf("FILE_MIGRATE_%d: %s", dc, message),
}}
}
func NewFileMigrateX2(dc int) *TLRpcError {
return &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(TLRpcErrorCodes_ERROR_SEE_OTHER),
ErrorMessage: fmt.Sprintf("FILE_MIGRATE_%d", dc),
}}
}
func NewPhoneMigrateX(dc int32, message string) *TLRpcError {
return &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(TLRpcErrorCodes_ERROR_SEE_OTHER),
ErrorMessage: fmt.Sprintf("PHONE_MIGRATE_%d: %s", dc, message),
}}
}
func | (dc int) *TLRpcError {
return &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(TLRpcErrorCodes_ERROR_SEE_OTHER),
ErrorMessage: fmt.Sprintf("PHONE_MIGRATE_%d", dc),
}}
}
func NewNetworkMigrateX(dc int32, message string) *TLRpcError {
return &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(TLRpcErrorCodes_ERROR_SEE_OTHER),
ErrorMessage: fmt.Sprintf("NETWORK_MIGRATE_%d: %s", dc, message),
}}
}
func NewNetworkMigrateX2(dc int) *TLRpcError {
return &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(TLRpcErrorCodes_ERROR_SEE_OTHER),
ErrorMessage: fmt.Sprintf("NETWORK_MIGRATE_%d", dc),
}}
}
func NewUserMigrateX(dc int32, message string) *TLRpcError {
return &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(TLRpcErrorCodes_ERROR_SEE_OTHER),
ErrorMessage: fmt.Sprintf("USER_MIGRATE_%d: %s", dc, message),
}}
}
func NewUserMigrateX2(dc int32) *TLRpcError {
return &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(TLRpcErrorCodes_ERROR_SEE_OTHER),
ErrorMessage: fmt.Sprintf("USER_MIGRATE_%d", dc),
}}
}
// FLOOD_WAIT_X: A wait of X seconds is required (where X is a number)
//
func NewFloodWaitX(second int32, message string) *TLRpcError {
return &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(TLRpcErrorCodes_FLOOD),
ErrorMessage: fmt.Sprintf("FLOOD_WAIT_%d: %s", second, message),
}}
}
func NewFloodWaitX2(second int) *TLRpcError {
return &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(TLRpcErrorCodes_FLOOD),
ErrorMessage: fmt.Sprintf("FLOOD_WAIT_%d", second),
}}
}
// normal code NewXXX
func NewRpcError(code int32, message string) (err *TLRpcError) {
if name, ok := TLRpcErrorCodes_name[int32(code)]; ok {
if code <= int32(TLRpcErrorCodes_OTHER2) {
err = &TLRpcError{Data2: &RpcError_Data{
ErrorCode: code,
ErrorMessage: fmt.Sprintf("%s: %s", name, message),
}}
} else {
switch code {
// Not
case int32(TLRpcErrorCodes_FILE_MIGRATE_X),
int32(TLRpcErrorCodes_NETWORK_MIGRATE_X),
int32(TLRpcErrorCodes_PHONE_MIGRATE_X),
int32(TLRpcErrorCodes_USER_MIGRATE_X):
err = &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(TLRpcErrorCodes_OTHER2),
ErrorMessage: fmt.Sprintf("INTERNAL_SERVER_ERROR: Not invoke NewRpcError(%s), please use New%s(dc, %s), ", name, name, message),
}}
glog.Error(err)
case int32(TLRpcErrorCodes_FLOOD_WAIT_X):
err = &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(TLRpcErrorCodes_FLOOD),
ErrorMessage: fmt.Sprintf("FLOOD_WAIT_%s: %s", name, name),
}}
glog.Error(err)
default:
code2 := code / 1000
if code2 == 0 {
code2 = code
}
err = &TLRpcError{Data2: &RpcError_Data{
// subcode = code * 1000 + i
ErrorCode: int32(code2),
ErrorMessage: name,
}}
}
}
} else {
err = &TLRpcError{Data2: &RpcError_Data{
// subcode = code * 10000 + i
ErrorCode: int32(TLRpcErrorCodes_INTERNAL),
ErrorMessage: fmt.Sprintf("INTERNAL_SERVER_ERROR: code = %d, message = %s", code, message),
}}
}
return
}
// normal code NewXXX
func NewRpcError2(code TLRpcErrorCodes) (err *TLRpcError) {
if name, ok := TLRpcErrorCodes_name[int32(code)]; ok {
if code <= TLRpcErrorCodes_OTHER2 {
err = &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(code),
ErrorMessage: name,
}}
} else {
switch code {
// Not
case TLRpcErrorCodes_FILE_MIGRATE_X,
TLRpcErrorCodes_NETWORK_MIGRATE_X,
TLRpcErrorCodes_PHONE_MIGRATE_X,
TLRpcErrorCodes_USER_MIGRATE_X:
err = &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(TLRpcErrorCodes_OTHER2),
ErrorMessage: fmt.Sprintf("INTERNAL_SERVER_ERROR: Not invoke NewRpcError(%s), please use New%s(dc), ", name, name),
}}
glog.Fatal(err)
case TLRpcErrorCodes_FLOOD_WAIT_X:
err = &TLRpcError{Data2: &RpcError_Data{
ErrorCode: int32(TLRpcErrorCodes_FLOOD),
ErrorMessage: fmt.Sprintf("INTERNAL_SERVER_ERROR: Not invoke NewRpcError(%s), please use NewFloodWaitX2(seconds), ", name),
}}
glog.Error(err)
default:
code2 := code / 1000
if code2 == 0 {
code2 = code
}
err = &TLRpcError{Data2: &RpcError_Data{
// subcode = code * 1000 + i
ErrorCode: int32(code2),
ErrorMessage: name,
}}
}
}
} else {
err = &TLRpcError{Data2: &RpcError_Data{
// subcode = code * 10000 + i
ErrorCode: int32(TLRpcErrorCodes_INTERNAL),
ErrorMessage: "INTERNAL_SERVER_ERROR",
}}
}
return
}
// Impl error interface
func (e *TLRpcError) Error() string {
return fmt.Sprintf("rpc error: code = %d desc = %s", e.GetErrorCode(), e.GetErrorMessage())
}
// Impl error interface
func (e *TLRpcError) ToGrpcStatus() *status.Status {
return status.New(codes.Internal, e.Error())
}
/*
// Impl error interface
func (e *TLRpcError) ToMetadata() (metadata.MD) {
// return status.New(codes.Internal, e.Error())
if name2, ok := TLRpcErrorCodes_name[e.ErrorCode]; ok {
return metadata.Pairs(
"rpc_error_code", name2,
"rpc_error_message", e.ErrorMessage)
}
return metadata.Pairs(
"rpc_error_code", "OTHER2",
"rpc_error_message", fmt.Sprintf("INTERNAL_SERVER_ERROR: %s", e.ErrorMessage))
}
func NewRpcErrorFromMetadata(md metadata.MD) (*TLRpcError, error) {
e := &TLRpcError{}
if v, ok := getFirstKeyVal(md, "rpc_error_code"); ok {
if code, ok := TLRpcErrorCodes_value[v]; !ok {
return nil, fmt.Errorf("Invalid rpc_error_code: %s", v)
} else {
e.ErrorCode = code
}
} else {
return nil, fmt.Errorf("Not found metadata's key: rpc_error_code")
}
if v, ok := getFirstKeyVal(md, "rpc_error_message"); !ok {
e.ErrorMessage = v
} else {
return nil, fmt.Errorf("Not found metadata's key: rpc_error_message")
}
return e, nil
}
*/
| NewPhoneMigrateX2 |
actionTypes.ts | export const actionTypes = {
//将列表的数据操作放置redux
FETCH_MEMBERS_COMPLETED: 'FETCH_MEMBERS_COMPLETED',
//表单/member/:id
FETCH_MEMBER_BY_ID_COMPLETED: 'FETCH_MEMBER_BY_ID_COMPLETED',
// change action 修改
UPDATE_MEMBER_FIELD: 'UPDATE_MEMBER_FIELD',
// 保存
SAVE_MEMBER: 'SAVE_MEMBER' | } |
|
exceptions.py | # vim: ft=python fileencoding=utf-8 sw=4 et sts=4
"""Contains custom exceptions used by vimiv."""
class NoSearchResultsError(Exception):
"""Raised when a search result is accessed although there are no results."""
class StringConversionError(ValueError):
"""Raised when a setting or argument could not be converted to its type."""
class SettingNotFoundError(Exception):
"""Raised when a setting does not exist."""
class | (Exception):
"""Raised when a setting is not a boolean."""
class NotANumber(Exception):
"""Raised when a setting is not a number."""
class AliasError(Exception):
"""Raised when there are problems when adding an alias."""
class TrashUndeleteError(Exception):
"""Raised when there were problems calling :undelete."""
class NotTransformable(Exception):
"""Raised when an image is not transformable for transform.py."""
class ArgumentAmountError(Exception):
"""Raised if the amount of arguments is not compatible with a command."""
| NotABoolean |
schemasApi.ts | /**
* LUSID API
* # Introduction This page documents the [LUSID APIs](https://www.lusid.com/api/swagger), which allows authorised clients to query and update their data within the LUSID platform. SDKs to interact with the LUSID APIs are available in the following languages and frameworks: * [C#](https://github.com/finbourne/lusid-sdk-csharp) * [Java](https://github.com/finbourne/lusid-sdk-java) * [JavaScript](https://github.com/finbourne/lusid-sdk-js) * [Python](https://github.com/finbourne/lusid-sdk-python) * [Angular](https://github.com/finbourne/lusid-sdk-angular) The LUSID platform is made up of a number of sub-applications. You can find the API / swagger documentation by following the links in the table below. | Application | Description | API / Swagger Documentation | | ----- | ----- | ---- | | LUSID | Open, API-first, developer-friendly investment data platform. | [Swagger](https://www.lusid.com/api/swagger/index.html) | | Web app | User-facing front end for LUSID. | [Swagger](https://www.lusid.com/app/swagger/index.html) | | Scheduler | Automated job scheduler. | [Swagger](https://www.lusid.com/scheduler2/swagger/index.html) | | Insights |Monitoring and troubleshooting service. | [Swagger](https://www.lusid.com/insights/swagger/index.html) | | Identity | Identity management for LUSID (in conjuction with Access) | [Swagger](https://www.lusid.com/identity/swagger/index.html) | | Access | Access control for LUSID (in conjunction with Identity) | [Swagger](https://www.lusid.com/access/swagger/index.html) | | Drive | Secure file repository and manager for collaboration. | [Swagger](https://www.lusid.com/drive/swagger/index.html) | | Luminesce | Data virtualisation service (query data from multiple providers, including LUSID) | [Swagger](https://www.lusid.com/honeycomb/swagger/index.html) | | Notification | Notification service. | [Swagger](https://www.lusid.com/notifications/swagger/index.html) | | Configuration | File store for secrets and other sensitive information. | [Swagger](https://www.lusid.com/configuration/swagger/index.html) | # Error Codes | Code|Name|Description | | ---|---|--- | | <a name=\"-10\">-10</a>|Server Configuration Error| | | <a name=\"-1\">-1</a>|Unknown error|An unexpected error was encountered on our side. | | <a name=\"102\">102</a>|Version Not Found| | | <a name=\"103\">103</a>|Api Rate Limit Violation| | | <a name=\"104\">104</a>|Instrument Not Found| | | <a name=\"105\">105</a>|Property Not Found| | | <a name=\"106\">106</a>|Portfolio Recursion Depth| | | <a name=\"108\">108</a>|Group Not Found| | | <a name=\"109\">109</a>|Portfolio Not Found| | | <a name=\"110\">110</a>|Property Schema Not Found| | | <a name=\"111\">111</a>|Portfolio Ancestry Not Found| | | <a name=\"112\">112</a>|Portfolio With Id Already Exists| | | <a name=\"113\">113</a>|Orphaned Portfolio| | | <a name=\"119\">119</a>|Missing Base Claims| | | <a name=\"121\">121</a>|Property Not Defined| | | <a name=\"122\">122</a>|Cannot Delete System Property| | | <a name=\"123\">123</a>|Cannot Modify Immutable Property Field| | | <a name=\"124\">124</a>|Property Already Exists| | | <a name=\"125\">125</a>|Invalid Property Life Time| | | <a name=\"126\">126</a>|Property Constraint Style Excludes Properties| | | <a name=\"127\">127</a>|Cannot Modify Default Data Type| | | <a name=\"128\">128</a>|Group Already Exists| | | <a name=\"129\">129</a>|No Such Data Type| | | <a name=\"130\">130</a>|Undefined Value For Data Type| | | <a name=\"131\">131</a>|Unsupported Value Type Defined On Data Type| | | <a name=\"132\">132</a>|Validation Error| | | <a name=\"133\">133</a>|Loop Detected In Group Hierarchy| | | <a name=\"134\">134</a>|Undefined Acceptable Values| | | <a name=\"135\">135</a>|Sub Group Already Exists| | | <a name=\"138\">138</a>|Price Source Not Found| | | <a name=\"139\">139</a>|Analytic Store Not Found| | | <a name=\"141\">141</a>|Analytic Store Already Exists| | | <a name=\"143\">143</a>|Client Instrument Already Exists| | | <a name=\"144\">144</a>|Duplicate In Parameter Set| | | <a name=\"147\">147</a>|Results Not Found| | | <a name=\"148\">148</a>|Order Field Not In Result Set| | | <a name=\"149\">149</a>|Operation Failed| | | <a name=\"150\">150</a>|Elastic Search Error| | | <a name=\"151\">151</a>|Invalid Parameter Value| | | <a name=\"153\">153</a>|Command Processing Failure| | | <a name=\"154\">154</a>|Entity State Construction Failure| | | <a name=\"155\">155</a>|Entity Timeline Does Not Exist| | | <a name=\"156\">156</a>|Concurrency Conflict Failure| | | <a name=\"157\">157</a>|Invalid Request| | | <a name=\"158\">158</a>|Event Publish Unknown| | | <a name=\"159\">159</a>|Event Query Failure| | | <a name=\"160\">160</a>|Blob Did Not Exist| | | <a name=\"162\">162</a>|Sub System Request Failure| | | <a name=\"163\">163</a>|Sub System Configuration Failure| | | <a name=\"165\">165</a>|Failed To Delete| | | <a name=\"166\">166</a>|Upsert Client Instrument Failure| | | <a name=\"167\">167</a>|Illegal As At Interval| | | <a name=\"168\">168</a>|Illegal Bitemporal Query| | | <a name=\"169\">169</a>|Invalid Alternate Id| | | <a name=\"170\">170</a>|Cannot Add Source Portfolio Property Explicitly| | | <a name=\"171\">171</a>|Entity Already Exists In Group| | | <a name=\"173\">173</a>|Entity With Id Already Exists| | | <a name=\"174\">174</a>|Derived Portfolio Details Do Not Exist| | | <a name=\"176\">176</a>|Portfolio With Name Already Exists| | | <a name=\"177\">177</a>|Invalid Transactions| | | <a name=\"178\">178</a>|Reference Portfolio Not Found| | | <a name=\"179\">179</a>|Duplicate Id| | | <a name=\"180\">180</a>|Command Retrieval Failure| | | <a name=\"181\">181</a>|Data Filter Application Failure| | | <a name=\"182\">182</a>|Search Failed| | | <a name=\"183\">183</a>|Movements Engine Configuration Key Failure| | | <a name=\"184\">184</a>|Fx Rate Source Not Found| | | <a name=\"185\">185</a>|Accrual Source Not Found| | | <a name=\"186\">186</a>|Access Denied| | | <a name=\"187\">187</a>|Invalid Identity Token| | | <a name=\"188\">188</a>|Invalid Request Headers| | | <a name=\"189\">189</a>|Price Not Found| | | <a name=\"190\">190</a>|Invalid Sub Holding Keys Provided| | | <a name=\"191\">191</a>|Duplicate Sub Holding Keys Provided| | | <a name=\"192\">192</a>|Cut Definition Not Found| | | <a name=\"193\">193</a>|Cut Definition Invalid| | | <a name=\"194\">194</a>|Time Variant Property Deletion Date Unspecified| | | <a name=\"195\">195</a>|Perpetual Property Deletion Date Specified| | | <a name=\"196\">196</a>|Time Variant Property Upsert Date Unspecified| | | <a name=\"197\">197</a>|Perpetual Property Upsert Date Specified| | | <a name=\"200\">200</a>|Invalid Unit For Data Type| | | <a name=\"201\">201</a>|Invalid Type For Data Type| | | <a name=\"202\">202</a>|Invalid Value For Data Type| | | <a name=\"203\">203</a>|Unit Not Defined For Data Type| | | <a name=\"204\">204</a>|Units Not Supported On Data Type| | | <a name=\"205\">205</a>|Cannot Specify Units On Data Type| | | <a name=\"206\">206</a>|Unit Schema Inconsistent With Data Type| | | <a name=\"207\">207</a>|Unit Definition Not Specified| | | <a name=\"208\">208</a>|Duplicate Unit Definitions Specified| | | <a name=\"209\">209</a>|Invalid Units Definition| | | <a name=\"210\">210</a>|Invalid Instrument Identifier Unit| | | <a name=\"211\">211</a>|Holdings Adjustment Does Not Exist| | | <a name=\"212\">212</a>|Could Not Build Excel Url| | | <a name=\"213\">213</a>|Could Not Get Excel Version| | | <a name=\"214\">214</a>|Instrument By Code Not Found| | | <a name=\"215\">215</a>|Entity Schema Does Not Exist| | | <a name=\"216\">216</a>|Feature Not Supported On Portfolio Type| | | <a name=\"217\">217</a>|Quote Not Found| | | <a name=\"218\">218</a>|Invalid Quote Identifier| | | <a name=\"219\">219</a>|Invalid Metric For Data Type| | | <a name=\"220\">220</a>|Invalid Instrument Definition| | | <a name=\"221\">221</a>|Instrument Upsert Failure| | | <a name=\"222\">222</a>|Reference Portfolio Request Not Supported| | | <a name=\"223\">223</a>|Transaction Portfolio Request Not Supported| | | <a name=\"224\">224</a>|Invalid Property Value Assignment| | | <a name=\"230\">230</a>|Transaction Type Not Found| | | <a name=\"231\">231</a>|Transaction Type Duplication| | | <a name=\"232\">232</a>|Portfolio Does Not Exist At Given Date| | | <a name=\"233\">233</a>|Query Parser Failure| | | <a name=\"234\">234</a>|Duplicate Constituent| | | <a name=\"235\">235</a>|Unresolved Instrument Constituent| | | <a name=\"236\">236</a>|Unresolved Instrument In Transition| | | <a name=\"237\">237</a>|Missing Side Definitions| | | <a name=\"299\">299</a>|Invalid Recipe| | | <a name=\"300\">300</a>|Missing Recipe| | | <a name=\"301\">301</a>|Dependencies| | | <a name=\"304\">304</a>|Portfolio Preprocess Failure| | | <a name=\"310\">310</a>|Valuation Engine Failure| | | <a name=\"311\">311</a>|Task Factory Failure| | | <a name=\"312\">312</a>|Task Evaluation Failure| | | <a name=\"313\">313</a>|Task Generation Failure| | | <a name=\"314\">314</a>|Engine Configuration Failure| | | <a name=\"315\">315</a>|Model Specification Failure| | | <a name=\"320\">320</a>|Market Data Key Failure| | | <a name=\"321\">321</a>|Market Resolver Failure| | | <a name=\"322\">322</a>|Market Data Failure| | | <a name=\"330\">330</a>|Curve Failure| | | <a name=\"331\">331</a>|Volatility Surface Failure| | | <a name=\"332\">332</a>|Volatility Cube Failure| | | <a name=\"350\">350</a>|Instrument Failure| | | <a name=\"351\">351</a>|Cash Flows Failure| | | <a name=\"352\">352</a>|Reference Data Failure| | | <a name=\"360\">360</a>|Aggregation Failure| | | <a name=\"361\">361</a>|Aggregation Measure Failure| | | <a name=\"370\">370</a>|Result Retrieval Failure| | | <a name=\"371\">371</a>|Result Processing Failure| | | <a name=\"372\">372</a>|Vendor Result Processing Failure| | | <a name=\"373\">373</a>|Vendor Result Mapping Failure| | | <a name=\"374\">374</a>|Vendor Library Unauthorised| | | <a name=\"375\">375</a>|Vendor Connectivity Error| | | <a name=\"376\">376</a>|Vendor Interface Error| | | <a name=\"377\">377</a>|Vendor Pricing Failure| | | <a name=\"378\">378</a>|Vendor Translation Failure| | | <a name=\"379\">379</a>|Vendor Key Mapping Failure| | | <a name=\"380\">380</a>|Vendor Reflection Failure| | | <a name=\"381\">381</a>|Vendor Process Failure| | | <a name=\"382\">382</a>|Vendor System Failure| | | <a name=\"390\">390</a>|Attempt To Upsert Duplicate Quotes| | | <a name=\"391\">391</a>|Corporate Action Source Does Not Exist| | | <a name=\"392\">392</a>|Corporate Action Source Already Exists| | | <a name=\"393\">393</a>|Instrument Identifier Already In Use| | | <a name=\"394\">394</a>|Properties Not Found| | | <a name=\"395\">395</a>|Batch Operation Aborted| | | <a name=\"400\">400</a>|Invalid Iso4217 Currency Code| | | <a name=\"401\">401</a>|Cannot Assign Instrument Identifier To Currency| | | <a name=\"402\">402</a>|Cannot Assign Currency Identifier To Non Currency| | | <a name=\"403\">403</a>|Currency Instrument Cannot Be Deleted| | | <a name=\"404\">404</a>|Currency Instrument Cannot Have Economic Definition| | | <a name=\"405\">405</a>|Currency Instrument Cannot Have Lookthrough Portfolio| | | <a name=\"406\">406</a>|Cannot Create Currency Instrument With Multiple Identifiers| | | <a name=\"407\">407</a>|Specified Currency Is Undefined| | | <a name=\"410\">410</a>|Index Does Not Exist| | | <a name=\"411\">411</a>|Sort Field Does Not Exist| | | <a name=\"413\">413</a>|Negative Pagination Parameters| | | <a name=\"414\">414</a>|Invalid Search Syntax| | | <a name=\"415\">415</a>|Filter Execution Timeout| | | <a name=\"420\">420</a>|Side Definition Inconsistent| | | <a name=\"450\">450</a>|Invalid Quote Access Metadata Rule| | | <a name=\"451\">451</a>|Access Metadata Not Found| | | <a name=\"452\">452</a>|Invalid Access Metadata Identifier| | | <a name=\"460\">460</a>|Standard Resource Not Found| | | <a name=\"461\">461</a>|Standard Resource Conflict| | | <a name=\"462\">462</a>|Calendar Not Found| | | <a name=\"463\">463</a>|Date In A Calendar Not Found| | | <a name=\"464\">464</a>|Invalid Date Source Data| | | <a name=\"465\">465</a>|Invalid Timezone| | | <a name=\"601\">601</a>|Person Identifier Already In Use| | | <a name=\"602\">602</a>|Person Not Found| | | <a name=\"603\">603</a>|Cannot Set Identifier| | | <a name=\"617\">617</a>|Invalid Recipe Specification In Request| | | <a name=\"618\">618</a>|Inline Recipe Deserialisation Failure| | | <a name=\"619\">619</a>|Identifier Types Not Set For Entity| | | <a name=\"620\">620</a>|Cannot Delete All Client Defined Identifiers| | | <a name=\"650\">650</a>|The Order requested was not found.| | | <a name=\"654\">654</a>|The Allocation requested was not found.| | | <a name=\"655\">655</a>|Cannot build the fx forward target with the given holdings.| | | <a name=\"656\">656</a>|Group does not contain expected entities.| | | <a name=\"667\">667</a>|Relation definition already exists| | | <a name=\"673\">673</a>|Missing entitlements for entities in Group| | | <a name=\"674\">674</a>|Next Best Action not found| | | <a name=\"676\">676</a>|Relation definition not defined| | | <a name=\"677\">677</a>|Invalid entity identifier for relation| | | <a name=\"681\">681</a>|Sorting by specified field not supported|One or more of the provided fields to order by were either invalid or not supported. | | <a name=\"682\">682</a>|Too many fields to sort by|The number of fields to sort the data by exceeds the number allowed by the endpoint | | <a name=\"684\">684</a>|Sequence Not Found| | | <a name=\"685\">685</a>|Sequence Already Exists| | | <a name=\"686\">686</a>|Non-cycling sequence has been exhausted| | | <a name=\"687\">687</a>|Legal Entity Identifier Already In Use| | | <a name=\"688\">688</a>|Legal Entity Not Found| | | <a name=\"689\">689</a>|The supplied pagination token is invalid| | | <a name=\"690\">690</a>|Property Type Is Not Supported| | | <a name=\"691\">691</a>|Multiple Tax-lots For Currency Type Is Not Supported| | | <a name=\"692\">692</a>|This endpoint does not support impersonation| | | <a name=\"693\">693</a>|Entity type is not supported for Relationship| | | <a name=\"694\">694</a>|Relationship Validation Failure| | | <a name=\"695\">695</a>|Relationship Not Found| | | <a name=\"697\">697</a>|Derived Property Formula No Longer Valid| | | <a name=\"698\">698</a>|Story is not available| | | <a name=\"703\">703</a>|Corporate Action Does Not Exist| | | <a name=\"720\">720</a>|The provided sort and filter combination is not valid| | | <a name=\"721\">721</a>|A2B generation failed| | | <a name=\"722\">722</a>|Aggregated Return Calculation Failure| | | <a name=\"723\">723</a>|Custom Entity Definition Identifier Already In Use| | | <a name=\"724\">724</a>|Custom Entity Definition Not Found| | | <a name=\"725\">725</a>|The Placement requested was not found.| | | <a name=\"726\">726</a>|The Execution requested was not found.| | | <a name=\"727\">727</a>|The Block requested was not found.| | | <a name=\"728\">728</a>|The Participation requested was not found.| | | <a name=\"729\">729</a>|The Package requested was not found.| | | <a name=\"730\">730</a>|The OrderInstruction requested was not found.| | | <a name=\"732\">732</a>|Custom Entity not found.| | | <a name=\"733\">733</a>|Custom Entity Identifier already in use.| | | <a name=\"735\">735</a>|Calculation Failed.| | | <a name=\"736\">736</a>|An expected key on HttpResponse is missing.| | | <a name=\"737\">737</a>|A required fee detail is missing.| | | <a name=\"738\">738</a>|Zero rows were returned from Luminesce| | | <a name=\"739\">739</a>|Provided Weekend Mask was invalid| | | <a name=\"742\">742</a>|Custom Entity fields do not match the definition| | | <a name=\"746\">746</a>|The provided sequence is not valid.| | | <a name=\"751\">751</a>|The type of the Custom Entity is different than the type provided in the definition.| | | <a name=\"752\">752</a>|Luminesce process returned an error.| |
*
* The version of the OpenAPI document: 0.11.3648
* Contact: [email protected]
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
import localVarRequest from 'request';
import http from 'http';
/* tslint:disable:no-unused-locals */
import { LusidProblemDetails } from '../model/lusidProblemDetails';
import { LusidValidationProblemDetails } from '../model/lusidValidationProblemDetails';
import { PropertySchema } from '../model/propertySchema';
import { ResourceListOfString } from '../model/resourceListOfString';
import { ResourceListOfValueType } from '../model/resourceListOfValueType';
import { Schema } from '../model/schema';
import { ObjectSerializer, Authentication, VoidAuth, Interceptor } from '../model/models';
import { HttpBasicAuth, HttpBearerAuth, ApiKeyAuth, OAuth } from '../model/models';
class | extends Error {
constructor (public response: http.IncomingMessage, public body: any, public statusCode?: number) {
super('HTTP request failed');
this.name = 'HttpError';
}
}
let defaultBasePath = 'https://fbn-prd.lusid.com/api';
// ===============================================
// This file is autogenerated - Please do not edit
// ===============================================
export enum SchemasApiApiKeys {
}
export class SchemasApi {
protected _basePath = defaultBasePath;
protected _defaultHeaders : any = {};
protected _useQuerystring : boolean = false;
protected authentications = {
'default': <Authentication>new VoidAuth(),
'oauth2': new OAuth(),
}
protected interceptors: Interceptor[] = [];
constructor(basePath?: string);
constructor(basePathOrUsername: string, password?: string, basePath?: string) {
if (password) {
if (basePath) {
this.basePath = basePath;
}
} else {
if (basePathOrUsername) {
this.basePath = basePathOrUsername
}
}
}
set useQuerystring(value: boolean) {
this._useQuerystring = value;
}
set basePath(basePath: string) {
this._basePath = basePath;
}
set defaultHeaders(defaultHeaders: any) {
this._defaultHeaders = defaultHeaders;
}
get defaultHeaders() {
return this._defaultHeaders;
}
get basePath() {
return this._basePath;
}
public setDefaultAuthentication(auth: Authentication) {
this.authentications.default = auth;
}
public setApiKey(key: SchemasApiApiKeys, value: string) {
(this.authentications as any)[SchemasApiApiKeys[key]].apiKey = value;
}
set accessToken(token: string) {
this.authentications.oauth2.accessToken = token;
}
public addInterceptor(interceptor: Interceptor) {
this.interceptors.push(interceptor);
}
/**
* Gets the schema and meta-data for a given entity
* @summary [BETA] GetEntitySchema: Get schema
* @param entity The name of a valid entity
*/
public async getEntitySchema (entity: string, options: {headers: {[name: string]: string}} = {headers: {}}) : Promise<{ response: http.IncomingMessage; body: Schema; }> {
const localVarPath = this.basePath + '/api/schemas/entities/{entity}'
.replace('{' + 'entity' + '}', encodeURIComponent(String(entity)));
let localVarQueryParameters: any = {};
let localVarHeaderParams: any = (<any>Object).assign({}, this._defaultHeaders);
const produces = ['text/plain', 'application/json', 'text/json'];
// give precedence to 'application/json'
if (produces.indexOf('application/json') >= 0) {
localVarHeaderParams.Accept = 'application/json';
} else {
localVarHeaderParams.Accept = produces.join(',');
}
let localVarFormParams: any = {};
// verify required parameter 'entity' is not null or undefined
if (entity === null || entity === undefined) {
throw new Error('Required parameter entity was null or undefined when calling getEntitySchema.');
}
(<any>Object).assign(localVarHeaderParams, options.headers);
let localVarUseFormData = false;
let localVarRequestOptions: localVarRequest.Options = {
method: 'GET',
qs: localVarQueryParameters,
headers: localVarHeaderParams,
uri: localVarPath,
useQuerystring: this._useQuerystring,
json: true,
};
let authenticationPromise = Promise.resolve();
if (this.authentications.oauth2.accessToken) {
authenticationPromise = authenticationPromise.then(() => this.authentications.oauth2.applyToRequest(localVarRequestOptions));
}
authenticationPromise = authenticationPromise.then(() => this.authentications.default.applyToRequest(localVarRequestOptions));
let interceptorPromise = authenticationPromise;
for (const interceptor of this.interceptors) {
interceptorPromise = interceptorPromise.then(() => interceptor(localVarRequestOptions));
}
return interceptorPromise.then(() => {
if (Object.keys(localVarFormParams).length) {
if (localVarUseFormData) {
(<any>localVarRequestOptions).formData = localVarFormParams;
} else {
localVarRequestOptions.form = localVarFormParams;
}
}
return new Promise<{ response: http.IncomingMessage; body: Schema; }>((resolve, reject) => {
localVarRequest(localVarRequestOptions, (error, response, body) => {
if (error) {
reject(error);
} else {
body = ObjectSerializer.deserialize(body, "Schema");
if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) {
resolve({ response: response, body: body });
} else {
reject(new HttpError(response, body, response.statusCode));
}
}
});
});
});
}
/**
* Get the schemas for the provided list of property keys.
* @summary [BETA] GetPropertySchema: Get property schema
* @param propertyKeys One or more property keys for which the schema is requested
* @param asAt Optional. The AsAt date of the data
*/
public async getPropertySchema (propertyKeys?: Array<string>, asAt?: Date, options: {headers: {[name: string]: string}} = {headers: {}}) : Promise<{ response: http.IncomingMessage; body: PropertySchema; }> {
const localVarPath = this.basePath + '/api/schemas/properties';
let localVarQueryParameters: any = {};
let localVarHeaderParams: any = (<any>Object).assign({}, this._defaultHeaders);
const produces = ['text/plain', 'application/json', 'text/json'];
// give precedence to 'application/json'
if (produces.indexOf('application/json') >= 0) {
localVarHeaderParams.Accept = 'application/json';
} else {
localVarHeaderParams.Accept = produces.join(',');
}
let localVarFormParams: any = {};
if (propertyKeys !== undefined) {
localVarQueryParameters['propertyKeys'] = ObjectSerializer.serialize(propertyKeys, "Array<string>");
}
if (asAt !== undefined) {
localVarQueryParameters['asAt'] = ObjectSerializer.serialize(asAt, "Date");
}
(<any>Object).assign(localVarHeaderParams, options.headers);
let localVarUseFormData = false;
let localVarRequestOptions: localVarRequest.Options = {
method: 'GET',
qs: localVarQueryParameters,
headers: localVarHeaderParams,
uri: localVarPath,
useQuerystring: this._useQuerystring,
json: true,
};
let authenticationPromise = Promise.resolve();
if (this.authentications.oauth2.accessToken) {
authenticationPromise = authenticationPromise.then(() => this.authentications.oauth2.applyToRequest(localVarRequestOptions));
}
authenticationPromise = authenticationPromise.then(() => this.authentications.default.applyToRequest(localVarRequestOptions));
let interceptorPromise = authenticationPromise;
for (const interceptor of this.interceptors) {
interceptorPromise = interceptorPromise.then(() => interceptor(localVarRequestOptions));
}
return interceptorPromise.then(() => {
if (Object.keys(localVarFormParams).length) {
if (localVarUseFormData) {
(<any>localVarRequestOptions).formData = localVarFormParams;
} else {
localVarRequestOptions.form = localVarFormParams;
}
}
return new Promise<{ response: http.IncomingMessage; body: PropertySchema; }>((resolve, reject) => {
localVarRequest(localVarRequestOptions, (error, response, body) => {
if (error) {
reject(error);
} else {
body = ObjectSerializer.deserialize(body, "PropertySchema");
if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) {
resolve({ response: response, body: body });
} else {
reject(new HttpError(response, body, response.statusCode));
}
}
});
});
});
}
/**
* Gets the available value types for which a schema is available.
* @summary [BETA] GetValueTypes: Get value types
* @param sortBy Optional. Order the results by these fields. Use use the \'-\' sign to denote descending order e.g. -MyFieldName
* @param start Optional. When paginating, skip this number of results
* @param limit Optional. When paginating, limit the number of returned results to this many.
*/
public async getValueTypes (sortBy?: Array<string>, start?: number, limit?: number, options: {headers: {[name: string]: string}} = {headers: {}}) : Promise<{ response: http.IncomingMessage; body: ResourceListOfValueType; }> {
const localVarPath = this.basePath + '/api/schemas/types';
let localVarQueryParameters: any = {};
let localVarHeaderParams: any = (<any>Object).assign({}, this._defaultHeaders);
const produces = ['text/plain', 'application/json', 'text/json'];
// give precedence to 'application/json'
if (produces.indexOf('application/json') >= 0) {
localVarHeaderParams.Accept = 'application/json';
} else {
localVarHeaderParams.Accept = produces.join(',');
}
let localVarFormParams: any = {};
if (sortBy !== undefined) {
localVarQueryParameters['sortBy'] = ObjectSerializer.serialize(sortBy, "Array<string>");
}
if (start !== undefined) {
localVarQueryParameters['start'] = ObjectSerializer.serialize(start, "number");
}
if (limit !== undefined) {
localVarQueryParameters['limit'] = ObjectSerializer.serialize(limit, "number");
}
(<any>Object).assign(localVarHeaderParams, options.headers);
let localVarUseFormData = false;
let localVarRequestOptions: localVarRequest.Options = {
method: 'GET',
qs: localVarQueryParameters,
headers: localVarHeaderParams,
uri: localVarPath,
useQuerystring: this._useQuerystring,
json: true,
};
let authenticationPromise = Promise.resolve();
if (this.authentications.oauth2.accessToken) {
authenticationPromise = authenticationPromise.then(() => this.authentications.oauth2.applyToRequest(localVarRequestOptions));
}
authenticationPromise = authenticationPromise.then(() => this.authentications.default.applyToRequest(localVarRequestOptions));
let interceptorPromise = authenticationPromise;
for (const interceptor of this.interceptors) {
interceptorPromise = interceptorPromise.then(() => interceptor(localVarRequestOptions));
}
return interceptorPromise.then(() => {
if (Object.keys(localVarFormParams).length) {
if (localVarUseFormData) {
(<any>localVarRequestOptions).formData = localVarFormParams;
} else {
localVarRequestOptions.form = localVarFormParams;
}
}
return new Promise<{ response: http.IncomingMessage; body: ResourceListOfValueType; }>((resolve, reject) => {
localVarRequest(localVarRequestOptions, (error, response, body) => {
if (error) {
reject(error);
} else {
body = ObjectSerializer.deserialize(body, "ResourceListOfValueType");
if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) {
resolve({ response: response, body: body });
} else {
reject(new HttpError(response, body, response.statusCode));
}
}
});
});
});
}
/**
* List all available entities for which schema information is available.
* @summary [BETA] ListEntities: List entities
*/
public async listEntities (options: {headers: {[name: string]: string}} = {headers: {}}) : Promise<{ response: http.IncomingMessage; body: ResourceListOfString; }> {
const localVarPath = this.basePath + '/api/schemas/entities';
let localVarQueryParameters: any = {};
let localVarHeaderParams: any = (<any>Object).assign({}, this._defaultHeaders);
const produces = ['text/plain', 'application/json', 'text/json'];
// give precedence to 'application/json'
if (produces.indexOf('application/json') >= 0) {
localVarHeaderParams.Accept = 'application/json';
} else {
localVarHeaderParams.Accept = produces.join(',');
}
let localVarFormParams: any = {};
(<any>Object).assign(localVarHeaderParams, options.headers);
let localVarUseFormData = false;
let localVarRequestOptions: localVarRequest.Options = {
method: 'GET',
qs: localVarQueryParameters,
headers: localVarHeaderParams,
uri: localVarPath,
useQuerystring: this._useQuerystring,
json: true,
};
let authenticationPromise = Promise.resolve();
if (this.authentications.oauth2.accessToken) {
authenticationPromise = authenticationPromise.then(() => this.authentications.oauth2.applyToRequest(localVarRequestOptions));
}
authenticationPromise = authenticationPromise.then(() => this.authentications.default.applyToRequest(localVarRequestOptions));
let interceptorPromise = authenticationPromise;
for (const interceptor of this.interceptors) {
interceptorPromise = interceptorPromise.then(() => interceptor(localVarRequestOptions));
}
return interceptorPromise.then(() => {
if (Object.keys(localVarFormParams).length) {
if (localVarUseFormData) {
(<any>localVarRequestOptions).formData = localVarFormParams;
} else {
localVarRequestOptions.form = localVarFormParams;
}
}
return new Promise<{ response: http.IncomingMessage; body: ResourceListOfString; }>((resolve, reject) => {
localVarRequest(localVarRequestOptions, (error, response, body) => {
if (error) {
reject(error);
} else {
body = ObjectSerializer.deserialize(body, "ResourceListOfString");
if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) {
resolve({ response: response, body: body });
} else {
reject(new HttpError(response, body, response.statusCode));
}
}
});
});
});
}
}
| HttpError |
kitties.rs | use support::{
decl_module, decl_storage, decl_event, ensure, StorageValue, StorageMap,
Parameter, traits::Currency
};
use sr_primitives::traits::{SimpleArithmetic, Bounded, Member};
use codec::{Encode, Decode};
use runtime_io::blake2_128;
use system::ensure_signed;
use rstd::result;
use crate::linked_item::{LinkedList, LinkedItem};
//TODO-001 add module
use crate::nftoken;
//TODO-002 nftoken::Trait
pub trait Trait: nftoken::Trait {
// pub trait Trait: system::Trait {
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
type KittyIndex: Parameter + Member + SimpleArithmetic + Bounded + Default + Copy;
type Currency: Currency<Self::AccountId>;
}
type BalanceOf<T> = <<T as Trait>::Currency as Currency<<T as system::Trait>::AccountId>>::Balance;
#[derive(Encode, Decode)]
pub struct Kitty(pub [u8; 16]);
type KittyLinkedItem<T> = LinkedItem<<T as Trait>::KittyIndex>;
type OwnedKittiesList<T> = LinkedList<OwnedKitties<T>, <T as system::Trait>::AccountId, <T as Trait>::KittyIndex>;
decl_storage! {
trait Store for Module<T: Trait> as Kitties {
/// Stores all the kitties, key is the kitty id / index
pub Kitties get(kitty): map T::KittyIndex => Option<Kitty>;
/// Stores the total number of kitties. i.e. the next kitty index
pub KittiesCount get(kitties_count): T::KittyIndex;
pub OwnedKitties get(owned_kitties): map (T::AccountId, Option<T::KittyIndex>) => Option<KittyLinkedItem<T>>;
/// Get kitty owner
pub KittyOwners get(kitty_owner): map T::KittyIndex => Option<T::AccountId>;
/// Get kitty price. None means not for sale.
pub KittyPrices get(kitty_price): map T::KittyIndex => Option<BalanceOf<T>>
}
}
decl_event!(
pub enum Event<T> where
<T as system::Trait>::AccountId,
<T as Trait>::KittyIndex,
Balance = BalanceOf<T>,
{
/// A kitty is created. (owner, kitty_id)
Created(AccountId, KittyIndex),
/// A kitty is transferred. (from, to, kitty_id)
Transferred(AccountId, AccountId, KittyIndex),
/// A kitty is available for sale. (owner, kitty_id, price)
Ask(AccountId, KittyIndex, Option<Balance>),
/// A kitty is sold. (from, to, kitty_id, price)
Sold(AccountId, AccountId, KittyIndex, Balance),
}
);
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
fn deposit_event() = default;
/// Create a new kitty
pub fn create(origin) {
let sender = ensure_signed(origin)?;
let kitty_id = Self::next_kitty_id()?;
// Generate a random 128bit value
let dna = Self::random_value(&sender);
// Create and store kitty
let kitty = Kitty(dna);
Self::insert_kitty(&sender, kitty_id, kitty);
Self::deposit_event(RawEvent::Created(sender, kitty_id));
}
// /// Breed kitties
// pub fn breed(origin, kitty_id_1: T::KittyIndex, kitty_id_2: T::KittyIndex) {
// let sender = ensure_signed(origin)?;
// let new_kitty_id = Self::do_breed(&sender, kitty_id_1, kitty_id_2)?;
// Self::deposit_event(RawEvent::Created(sender, new_kitty_id));
// }
// /// Transfer a kitty to new owner
// pub fn transfer(origin, to: T::AccountId, kitty_id: T::KittyIndex) {
// let sender = ensure_signed(origin)?;
// ensure!(<OwnedKitties<T>>::exists(&(sender.clone(), Some(kitty_id))), "Only owner can transfer kitty");
// Self::do_transfer(&sender, &to, kitty_id);
// Self::deposit_event(RawEvent::Transferred(sender, to, kitty_id));
// }
// /// Set a price for a kitty for sale
// /// None to delist the kitty
// pub fn ask(origin, kitty_id: T::KittyIndex, price: Option<BalanceOf<T>>) {
// let sender = ensure_signed(origin)?;
// ensure!(<OwnedKitties<T>>::exists(&(sender.clone(), Some(kitty_id))), "Only owner can set price for kitty");
// if let Some(ref price) = price {
// <KittyPrices<T>>::insert(kitty_id, price);
// } else {
// <KittyPrices<T>>::remove(kitty_id);
// }
// Self::deposit_event(RawEvent::Ask(sender, kitty_id, price));
// }
// pub fn buy(origin, kitty_id: T::KittyIndex, price: BalanceOf<T>) {
// let sender = ensure_signed(origin)?;
// let owner = Self::kitty_owner(kitty_id);
// ensure!(owner.is_some(), "Kitty does not exist");
// let owner = owner.unwrap();
// let kitty_price = Self::kitty_price(kitty_id);
// ensure!(kitty_price.is_some(), "Kitty not for sale");
// let kitty_price = kitty_price.unwrap();
// ensure!(price >= kitty_price, "Price is too low");
// T::Currency::transfer(&sender, &owner, kitty_price)?;
// <KittyPrices<T>>::remove(kitty_id);
// Self::do_transfer(&owner, &sender, kitty_id);
// Self::deposit_event(RawEvent::Sold(owner, sender, kitty_id, kitty_price));
// }
// }
}
fn combine_dna(dna1: u8, dna2: u8, selector: u8) -> u8 {
((selector & dna1) | (!selector & dna2))
}
impl<T: Trait> Module<T> {
fn random_value(sender: &T::AccountId) -> [u8; 16] {
let payload = (<system::Module<T>>::random_seed(), sender, <system::Module<T>>::extrinsic_index(), <system::Module<T>>::block_number());
payload.using_encoded(blake2_128)
}
fn next_kitty_id() -> result::Result<T::KittyIndex, &'static str> {
let kitty_id = Self::kitties_count();
if kitty_id == T::KittyIndex::max_value() {
return Err("Kitties count overflow");
}
Ok(kitty_id)
}
fn insert_owned_kitty(owner: &T::AccountId, kitty_id: T::KittyIndex) {
<OwnedKittiesList<T>>::append(owner, kitty_id);
}
fn insert_kitty(owner: &T::AccountId, kitty_id: T::KittyIndex, kitty: Kitty) {
// Create and store kitty
<Kitties<T>>::insert(kitty_id, kitty);
<KittiesCount<T>>::put(kitty_id + 1.into());
<KittyOwners<T>>::insert(kitty_id, owner.clone());
Self::insert_owned_kitty(owner, kitty_id);
}
fn do_breed(sender: &T::AccountId, kitty_id_1: T::KittyIndex, kitty_id_2: T::KittyIndex) -> result::Result<T::KittyIndex, &'static str> {
let kitty1 = Self::kitty(kitty_id_1);
let kitty2 = Self::kitty(kitty_id_2);
ensure!(kitty1.is_some(), "Invalid kitty_id_1");
ensure!(kitty2.is_some(), "Invalid kitty_id_2");
ensure!(kitty_id_1 != kitty_id_2, "Needs different parent");
ensure!(Self::kitty_owner(&kitty_id_1).map(|owner| owner == *sender).unwrap_or(false), "Not onwer of kitty1");
ensure!(Self::kitty_owner(&kitty_id_2).map(|owner| owner == *sender).unwrap_or(false), "Not owner of kitty2");
let kitty_id = Self::next_kitty_id()?;
let kitty1_dna = kitty1.unwrap().0;
let kitty2_dna = kitty2.unwrap().0;
// Generate a random 128bit value
let selector = Self::random_value(&sender);
let mut new_dna = [0u8; 16];
// Combine parents and selector to create new kitty
for i in 0..kitty1_dna.len() {
new_dna[i] = combine_dna(kitty1_dna[i], kitty2_dna[i], selector[i]);
}
Self::insert_kitty(sender, kitty_id, Kitty(new_dna));
Ok(kitty_id)
}
fn do_transfer(from: &T::AccountId, to: &T::AccountId, kitty_id: T::KittyIndex) {
<OwnedKittiesList<T>>::remove(&from, kitty_id);
<OwnedKittiesList<T>>::append(&to, kitty_id);
<KittyOwners<T>>::insert(kitty_id, to);
}
}
/// Tests for Kitties module
#[cfg(test)]
mod tests {
use super::*;
use runtime_io::with_externalities;
use primitives::{H256, Blake2Hasher};
use support::{impl_outer_origin, parameter_types};
use sr_primitives::{traits::{BlakeTwo256, IdentityLookup}, testing::Header};
use sr_primitives::weights::Weight;
use sr_primitives::Perbill;
impl_outer_origin! {
pub enum Origin for Test {}
}
// For testing the module, we construct most of a mock runtime. This means
// first constructing a configuration type (`Test`) which `impl`s each of the
// configuration traits of modules we want to use.
#[derive(Clone, Eq, PartialEq, Debug)]
pub struct Test;
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
}
impl system::Trait for Test {
type Origin = Origin;
type Call = ();
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type WeightMultiplierUpdate = ();
type Event = ();
type BlockHashCount = BlockHashCount;
type MaximumBlockWeight = MaximumBlockWeight;
type MaximumBlockLength = MaximumBlockLength;
type AvailableBlockRatio = AvailableBlockRatio;
type Version = ();
}
parameter_types! {
pub const ExistentialDeposit: u64 = 0;
pub const TransferFee: u64 = 0;
pub const CreationFee: u64 = 0;
pub const TransactionBaseFee: u64 = 0;
pub const TransactionByteFee: u64 = 0;
}
impl balances::Trait for Test {
type Balance = u64;
type OnFreeBalanceZero = ();
type OnNewAccount = ();
type Event = ();
type TransactionPayment = ();
type TransferPayment = ();
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type TransferFee = TransferFee;
type CreationFee = CreationFee;
type TransactionBaseFee = TransactionBaseFee;
type TransactionByteFee = TransactionByteFee;
type WeightToFee = ();
}
impl Trait for Test {
type KittyIndex = u32;
type Currency = balances::Module<Test>;
type Event = ();
}
type OwnedKittiesTest = OwnedKitties<Test>;
// This function basically just builds a genesis storage key/value store according to
// our desired mockup.
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
system::GenesisConfig::default().build_storage::<Test>().unwrap().into()
}
#[test]
fn owned_kitties_can_append_values() {
with_externalities(&mut new_test_ext(), || {
OwnedKittiesList::<Test>::append(&0, 1);
assert_eq!(OwnedKittiesTest::get(&(0, None)), Some(KittyLinkedItem::<Test> {
prev: Some(1),
next: Some(1),
}));
assert_eq!(OwnedKittiesTest::get(&(0, Some(1))), Some(KittyLinkedItem::<Test> {
prev: None,
next: None,
}));
OwnedKittiesList::<Test>::append(&0, 2);
assert_eq!(OwnedKittiesTest::get(&(0, None)), Some(KittyLinkedItem::<Test> {
prev: Some(2),
next: Some(1),
}));
assert_eq!(OwnedKittiesTest::get(&(0, Some(1))), Some(KittyLinkedItem::<Test> {
prev: None,
next: Some(2),
}));
assert_eq!(OwnedKittiesTest::get(&(0, Some(2))), Some(KittyLinkedItem::<Test> {
prev: Some(1),
next: None,
}));
OwnedKittiesList::<Test>::append(&0, 3);
assert_eq!(OwnedKittiesTest::get(&(0, None)), Some(KittyLinkedItem::<Test> {
prev: Some(3),
next: Some(1), | assert_eq!(OwnedKittiesTest::get(&(0, Some(1))), Some(KittyLinkedItem::<Test> {
prev: None,
next: Some(2),
}));
assert_eq!(OwnedKittiesTest::get(&(0, Some(2))), Some(KittyLinkedItem::<Test> {
prev: Some(1),
next: Some(3),
}));
assert_eq!(OwnedKittiesTest::get(&(0, Some(3))), Some(KittyLinkedItem::<Test> {
prev: Some(2),
next: None,
}));
});
}
#[test]
fn owned_kitties_can_remove_values() {
with_externalities(&mut new_test_ext(), || {
OwnedKittiesList::<Test>::append(&0, 1);
OwnedKittiesList::<Test>::append(&0, 2);
OwnedKittiesList::<Test>::append(&0, 3);
OwnedKittiesList::<Test>::remove(&0, 2);
assert_eq!(OwnedKittiesTest::get(&(0, None)), Some(KittyLinkedItem::<Test> {
prev: Some(3),
next: Some(1),
}));
assert_eq!(OwnedKittiesTest::get(&(0, Some(1))), Some(KittyLinkedItem::<Test> {
prev: None,
next: Some(3),
}));
assert_eq!(OwnedKittiesTest::get(&(0, Some(2))), None);
assert_eq!(OwnedKittiesTest::get(&(0, Some(3))), Some(KittyLinkedItem::<Test> {
prev: Some(1),
next: None,
}));
OwnedKittiesList::<Test>::remove(&0, 1);
assert_eq!(OwnedKittiesTest::get(&(0, None)), Some(KittyLinkedItem::<Test> {
prev: Some(3),
next: Some(3),
}));
assert_eq!(OwnedKittiesTest::get(&(0, Some(1))), None);
assert_eq!(OwnedKittiesTest::get(&(0, Some(2))), None);
assert_eq!(OwnedKittiesTest::get(&(0, Some(3))), Some(KittyLinkedItem::<Test> {
prev: None,
next: None,
}));
OwnedKittiesList::<Test>::remove(&0, 3);
assert_eq!(OwnedKittiesTest::get(&(0, None)), Some(KittyLinkedItem::<Test> {
prev: None,
next: None,
}));
assert_eq!(OwnedKittiesTest::get(&(0, Some(1))), None);
assert_eq!(OwnedKittiesTest::get(&(0, Some(2))), None);
assert_eq!(OwnedKittiesTest::get(&(0, Some(2))), None);
});
}
} | }));
|
node_test.go | /*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"testing"
"gotest.tools/assert"
"github.com/apache/incubator-yunikorn-k8shim/pkg/common"
"github.com/apache/incubator-yunikorn-k8shim/pkg/common/constants"
"github.com/apache/incubator-yunikorn-k8shim/pkg/common/test"
"github.com/apache/incubator-yunikorn-scheduler-interface/lib/go/si"
)
func TestAddExistingAllocation(t *testing.T) {
node := NewTestSchedulerNode()
alloc01 := si.Allocation{
AllocationKey: "pod001",
AllocationTags: nil,
UUID: "podUID001",
ResourcePerAlloc: nil,
Priority: nil,
QueueName: "",
NodeID: "host001",
ApplicationID: "",
PartitionName: constants.DefaultPartition,
}
node.addExistingAllocation(&alloc01)
assert.Equal(t, len(node.existingAllocations), 1)
alloc02 := node.existingAllocations[0]
assert.Equal(t, alloc02.AllocationKey, alloc01.AllocationKey)
assert.Equal(t, alloc02.UUID, alloc01.UUID)
assert.Equal(t, alloc02.NodeID, alloc01.NodeID)
assert.Equal(t, alloc02.PartitionName, alloc01.PartitionName)
}
func TestSetOccupiedResource(t *testing.T) {
node := NewTestSchedulerNode()
r1 := common.NewResourceBuilder().
AddResource(constants.Memory, 2).
AddResource(constants.CPU, 2).
Build()
node.setOccupiedResource(r1)
assert.Equal(t, node.occupied, r1)
}
func NewTestSchedulerNode() *SchedulerNode | {
api := test.NewSchedulerAPIMock()
r1 := common.NewResourceBuilder().
AddResource(constants.Memory, 1).
AddResource(constants.CPU, 1).
Build()
node := newSchedulerNode("host001", "UID001", "{\"label1\":\"key1\",\"label2\":\"key2\"}", r1, api, false, true)
return node
} |
|
__version__.py | NikeBot
"""
__title__ = 'nikebotandroid'
__description__ = 'A retail automation bot for the Nike mobile app'
__url__ = 'https: // github.com/olegaobini/NikeBot'
__version__ = '0.0.1'
__debug_mode__ = False
__author__ = 'Olega Obini'
__author_email__ = '[email protected]'
__license__ = 'MIT'
__copyright__ = 'Copyright 2022 Olega Obini' | __logo__ = """ |
|
index.tsx | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import {
EuiFlexGroup,
EuiFlexItem,
EuiPage,
EuiPanel,
EuiTitle,
} from '@elastic/eui';
import { i18n } from '@kbn/i18n';
import React from 'react';
import { useTrackPageview } from '../../../../../observability/public';
import { isRumAgentName } from '../../../../common/agent_name';
import { ChartsSyncContextProvider } from '../../../context/charts_sync_context';
import { TransactionErrorRateChart } from '../../shared/charts/transaction_error_rate_chart';
import { ServiceMapLink } from '../../shared/Links/apm/ServiceMapLink';
import { SearchBar } from '../../shared/search_bar';
import { ServiceOverviewErrorsTable } from './service_overview_errors_table';
import { ServiceOverviewTransactionsTable } from './service_overview_transactions_table';
import { TableLinkFlexItem } from './table_link_flex_item';
/**
* The height a chart should be if it's next to a table with 5 rows and a title.
* Add the height of the pagination row.
*/
export const chartHeight = 322;
interface ServiceOverviewProps {
agentName?: string;
serviceName: string;
}
export function ServiceOverview({
agentName,
serviceName,
}: ServiceOverviewProps) {
useTrackPageview({ app: 'apm', path: 'service_overview' });
useTrackPageview({ app: 'apm', path: 'service_overview', delay: 15000 });
return (
<ChartsSyncContextProvider>
<SearchBar />
<EuiPage>
<EuiFlexGroup direction="column" gutterSize="s">
<EuiFlexItem>
<EuiPanel>
<EuiTitle size="xs"> | {
defaultMessage: 'Latency',
}
)}
</h2>
</EuiTitle>
</EuiPanel>
</EuiFlexItem>
<EuiFlexItem>
<EuiFlexGroup gutterSize="s">
<EuiFlexItem grow={4}>
<EuiPanel>
<EuiTitle size="xs">
<h2>
{i18n.translate(
'xpack.apm.serviceOverview.trafficChartTitle',
{
defaultMessage: 'Traffic',
}
)}
</h2>
</EuiTitle>
</EuiPanel>
</EuiFlexItem>
<EuiFlexItem grow={6}>
<EuiPanel>
<ServiceOverviewTransactionsTable serviceName={serviceName} />
</EuiPanel>
</EuiFlexItem>
</EuiFlexGroup>
</EuiFlexItem>
<EuiFlexItem>
<EuiFlexGroup gutterSize="s">
{!isRumAgentName(agentName) && (
<EuiFlexItem grow={4}>
<TransactionErrorRateChart
height={chartHeight}
showAnnotations={false}
/>
</EuiFlexItem>
)}
<EuiFlexItem grow={6}>
<EuiPanel>
<ServiceOverviewErrorsTable serviceName={serviceName} />
</EuiPanel>
</EuiFlexItem>
</EuiFlexGroup>
</EuiFlexItem>
<EuiFlexItem>
<EuiFlexGroup gutterSize="s">
<EuiFlexItem grow={4}>
<EuiPanel>
<EuiFlexGroup>
<EuiFlexItem>
<EuiTitle size="xs">
<h2>
{i18n.translate(
'xpack.apm.serviceOverview.averageDurationBySpanTypeChartTitle',
{
defaultMessage: 'Average duration by span type',
}
)}
</h2>
</EuiTitle>
</EuiFlexItem>
</EuiFlexGroup>
</EuiPanel>
</EuiFlexItem>
<EuiFlexItem grow={6}>
<EuiPanel>
<EuiFlexGroup>
<EuiFlexItem>
<EuiTitle size="xs">
<h2>
{i18n.translate(
'xpack.apm.serviceOverview.dependenciesTableTitle',
{
defaultMessage: 'Dependencies',
}
)}
</h2>
</EuiTitle>
</EuiFlexItem>
<TableLinkFlexItem>
<ServiceMapLink serviceName={serviceName}>
{i18n.translate(
'xpack.apm.serviceOverview.dependenciesTableLinkText',
{
defaultMessage: 'View service map',
}
)}
</ServiceMapLink>
</TableLinkFlexItem>
</EuiFlexGroup>
</EuiPanel>
</EuiFlexItem>
</EuiFlexGroup>
</EuiFlexItem>
<EuiFlexItem>
<EuiFlexGroup gutterSize="s">
<EuiFlexItem grow={4}>
<EuiPanel>
<EuiTitle size="xs">
<h2>
{i18n.translate(
'xpack.apm.serviceOverview.instancesLatencyDistributionChartTitle',
{
defaultMessage: 'Instances latency distribution',
}
)}
</h2>
</EuiTitle>
</EuiPanel>
</EuiFlexItem>
<EuiFlexItem grow={6}>
<EuiPanel>
<EuiTitle size="xs">
<h2>
{i18n.translate(
'xpack.apm.serviceOverview.instancesTableTitle',
{
defaultMessage: 'Instances',
}
)}
</h2>
</EuiTitle>
</EuiPanel>
</EuiFlexItem>
</EuiFlexGroup>
</EuiFlexItem>
</EuiFlexGroup>
</EuiPage>
</ChartsSyncContextProvider>
);
} | <h2>
{i18n.translate(
'xpack.apm.serviceOverview.latencyChartTitle', |
validators.go | package rpc
import (
"fmt"
"net/http"
"strconv"
"github.com/gorilla/mux"
"github.com/spf13/cobra"
"bytes"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/context"
sdk "github.com/cosmos/cosmos-sdk/types"
tmTypes "github.com/tendermint/tendermint/types"
tmtypes "github.com/tendermint/tendermint/types"
)
// TODO these next two functions feel kinda hacky based on their placement
//ValidatorCommand returns the validator set for a given height
func ValidatorCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "validator-set [height]",
Short: "Get the full tendermint validator set at given height",
Args: cobra.MaximumNArgs(1),
RunE: printValidators,
}
cmd.Flags().StringP(client.FlagNode, "n", "tcp://localhost:26657", "Node to connect to")
cmd.Flags().Bool(client.FlagTrustNode, false, "Trust connected full node (don't verify proofs for responses)")
cmd.Flags().String(client.FlagChainID, "", "Chain ID of Tendermint node")
return cmd
}
// Validator output in bech32 format
type ValidatorOutput struct {
Address sdk.ValAddress `json:"address"` // in bech32
PubKey string `json:"pub_key"` // in bech32
Accum int64 `json:"accum"`
VotingPower int64 `json:"voting_power"`
}
// Validators at a certain height output in bech32 format
type ResultValidatorsOutput struct {
BlockHeight int64 `json:"block_height"`
Validators []ValidatorOutput `json:"validators"`
}
func bech32ValidatorOutput(validator *tmtypes.Validator) (ValidatorOutput, error) |
func getValidators(cliCtx context.CLIContext, height *int64) ([]byte, error) {
// get the node
node, err := cliCtx.GetNode()
if err != nil {
return nil, err
}
validatorsRes, err := node.Validators(height)
if err != nil {
return nil, err
}
if !cliCtx.TrustNode {
check, err := cliCtx.Certify(validatorsRes.BlockHeight)
if err != nil {
return nil, err
}
if !bytes.Equal(check.ValidatorsHash(), tmTypes.NewValidatorSet(validatorsRes.Validators).Hash()) {
return nil, fmt.Errorf("got invalid validatorset")
}
}
outputValidatorsRes := ResultValidatorsOutput{
BlockHeight: validatorsRes.BlockHeight,
Validators: make([]ValidatorOutput, len(validatorsRes.Validators)),
}
for i := 0; i < len(validatorsRes.Validators); i++ {
outputValidatorsRes.Validators[i], err = bech32ValidatorOutput(validatorsRes.Validators[i])
if err != nil {
return nil, err
}
}
output, err := cdc.MarshalJSON(outputValidatorsRes)
if err != nil {
return nil, err
}
return output, nil
}
// CMD
func printValidators(cmd *cobra.Command, args []string) error {
var height *int64
// optional height
if len(args) > 0 {
h, err := strconv.Atoi(args[0])
if err != nil {
return err
}
if h > 0 {
tmp := int64(h)
height = &tmp
}
}
output, err := getValidators(context.NewCLIContext(), height)
if err != nil {
return err
}
fmt.Println(string(output))
return nil
}
// REST
// Validator Set at a height REST handler
func ValidatorSetRequestHandlerFn(cliCtx context.CLIContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
height, err := strconv.ParseInt(vars["height"], 10, 64)
if err != nil {
w.WriteHeader(400)
w.Write([]byte("ERROR: Couldn't parse block height. Assumed format is '/validatorsets/{height}'."))
return
}
chainHeight, err := GetChainHeight(cliCtx)
if height > chainHeight {
w.WriteHeader(404)
w.Write([]byte("ERROR: Requested block height is bigger then the chain length."))
return
}
output, err := getValidators(cliCtx, &height)
if err != nil {
w.WriteHeader(500)
w.Write([]byte(err.Error()))
return
}
w.Write(output)
}
}
// Latest Validator Set REST handler
func LatestValidatorSetRequestHandlerFn(cliCtx context.CLIContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
height, err := GetChainHeight(cliCtx)
if err != nil {
w.WriteHeader(500)
w.Write([]byte(err.Error()))
return
}
output, err := getValidators(cliCtx, &height)
if err != nil {
w.WriteHeader(500)
w.Write([]byte(err.Error()))
return
}
w.Write(output)
}
}
| {
bechValPubkey, err := sdk.Bech32ifyConsPub(validator.PubKey)
if err != nil {
return ValidatorOutput{}, err
}
return ValidatorOutput{
Address: sdk.ValAddress(validator.Address),
PubKey: bechValPubkey,
Accum: validator.Accum,
VotingPower: validator.VotingPower,
}, nil
} |
index.js | module.exports = {
"env": {
"node": true
},
"repository": "[email protected]:carpathian-ninjas/eslint-config-base.git",
"plugin": [
"graphql"
],
"extends": [
"airbnb-base"
],
"rules": {
"import/extensions": [
"off"
],
"no-multiple-empty-lines": [
"error",
{
"max": 1,
"maxEOF": 1,
"maxBOF": 0
}
],
"curly": [
"error",
"multi"
],
"padding-line-between-statements": [
"error",
{
"blankLine": "never",
"prev": [
"expression",
"const",
"let",
"var",
"for",
"empty"
],
"next": [
"expression",
"const",
"let",
"var",
"for",
"empty"
]
},
{
"blankLine": "always",
"prev": "*",
"next": [
"if",
"return",
"throw",
"function",
"class",
"export",
"cjs-export"
]
},
{
"blankLine": "always",
"prev": [
"if",
"function",
"class",
"cjs-import",
"import"
],
"next": "*"
},
{
"blankLine": "never",
"prev": [
"cjs-import",
"import"
],
"next": [
"cjs-import",
"import"
]
},
{
"blankLine": "never",
"prev": "if",
"next": "if"
}
],
"indent": [
"error",
2
],
"linebreak-style": [
"error",
"unix"
],
"no-restricted-syntax": [
"error",
{
"selector": "ForInStatement",
"message": "for..in loops iterate over the entire prototype chain, which is virtually never what you want. Use Object.{keys,values,entries}, and iterate over the resulting array."
},
{
"selector": "LabeledStatement",
"message": "Labels are a form of GOTO; using them makes code confusing and hard to maintain and understand."
},
{
"selector": "WithStatement",
"message": "`with` is disallowed in strict mode because it makes code impossible to predict and optimize."
}
],
"no-shadow": [
"off"
],
"no-underscore-dangle": [
"off"
],
"no-plusplus": [
"off"
],
"no-param-reassign": [
"off"
],
"prefer-destructuring": [
"off"
],
"max-len": [ | 256,
2,
{
"ignoreUrls": true,
"ignoreComments": false,
"ignoreRegExpLiterals": true,
"ignoreStrings": true,
"ignoreTemplateLiterals": true,
"ignorePattern": "d=\"(m|M)"
}
],
"no-use-before-define": [
"error",
{
"functions": false,
"classes": true
}
],
"no-empty": "off",
"global-require": "off",
"consistent-return": "off",
"no-continue": "off",
"no-console": "off",
"no-await-in-loop": "off",
"radix": "off",
"no-nested-ternary": "off",
"object-curly-newline": "off",
"import/no-unresolved": "off",
"func-names": "off",
"camelcase": "off",
"arrow-parens": "off",
"import/prefer-default-export": "off",
"semi": [
"error",
"never"
]
}
} | "error", |
create_config.py | import os
import time
from click.testing import CliRunner
from bin.throne import cli as throne
runner = CliRunner()
shodan_key = os.getenv('SHODAN_KEY')
throne_user = os.getenv('THRONE_USER')
throne_pass = os.getenv('THRONE_PASS')
def test_throne_setapi():
print("Testing: throne api setapi")
response = runner.invoke(throne, ["api", "setapi", "-u", f"{throne_user}", "-p", f"{throne_pass}"])
assert response.exit_code == 0
assert "Successfully set throne API key." in response.output
def | ():
print("Testing: throne shodan setapi")
response = runner.invoke(throne, ["shodan", "setapi"], input=f"{shodan_key}")
assert response.exit_code == 0
assert "Successfully set Shodan API key." in response.output | test_shodan_setapi |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.