file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
test_delete_business.py | import unittest
import json
from app import create_app
from app.models.v2 import Business
class DeleteBusinessTestCase(unittest.TestCase):
"""This class represents the api test case"""
def setUp(self):
|
def tearDown(self):
""" clear data after every test"""
Business.query.delete()
def test_can_delete_successfully(self):
"""Tests that a business can be Deleted successfully"""
self.client().post(
'/api/v2/businesses',
data=json.dumps(self.business),
headers={
"content-type": "application/json",
"access-token": self.token
})
bsid = Business.query.first() # Get the last created Record
res2 = self.client().delete(
'/api/v2/businesses/' + str(bsid.id),
headers={
"content-type": "application/json",
"access-token": self.token
})
self.assertEqual(res2.status_code, 201)
self.assertIn("Business Deleted", str(res2.data))
def test_cannot_delete_empty(self):
"""Tests that cannot delete a business that doesn't exist"""
res2 = self.client().delete(
'/api/v2/businesses/1',
headers={
"content-type": "application/json",
"access-token": self.token
}
)
self.assertEqual(res2.status_code, 401)
self.assertIn("Business not found", str(res2.data))
def can_only_delete_own_business(self):
"""test that one can only delete a business they created """
res2 = self.client().delete(
'/api/v2/businesses/1',
headers={
"content-type": "application/json",
"access-token": self.token
}
)
self.assertEqual(res2.status_code, 401)
self.assertIn(
"Sorry! You can only delete your business!!", str(res2.data))
def test_can_only_delete_own_business(self):
"""Tests that users cannot delete other users businesses"""
self.client().post(
'/api/v2/auth/register',
data=json.dumps({
"username": "Miritim",
"email": "[email protected]",
"password": "qwerty123!@#",
"first_name": "eric",
"last_name": "Miriti"
}),
content_type='application/json'
)
login = self.client().post(
'/api/v2/auth/login',
data=json.dumps({
"username": "Miritim",
"password": "qwerty123!@#"
}),
content_type='application/json'
)
token = json.loads(login.data.decode("utf-8"))
bs = self.client().post(
'/api/v2/businesses',
data=json.dumps(self.business),
headers={
"content-type": "application/json",
"access-token": token['auth_token']
}
)
response = json.loads(bs.data.decode('utf-8'))
res2 = self.client().delete(
'/api/v2/businesses/' + str(response['Business']['id']),
headers={
"content-type": "application/json",
"access-token": self.token
}
)
self.assertEqual(res2.status_code, 401)
self.assertIn("Sorry! You can only delete your business",
str(res2.data))
| """
Will be called before every test
"""
self.app = create_app('testing')
self.app.app_context().push()
self.client = self.app.test_client
self.user = {
"username": "mwenda",
"email": "[email protected]",
"password": "qwerty123!@#",
"first_name": "eric",
"last_name": "Miriti"
}
self.logins = {
"username": "mwenda",
"password": "qwerty123!@#"
}
self.business = {
"name": "Andela",
"location": "Nairobi,Kenya",
"category": "Tech",
"description": "Epic"
}
self.client().post(
'/api/v2/auth/register',
data=json.dumps(self.user),
content_type='application/json'
)
self.login = self.client().post(
'/api/v2/auth/login',
data=json.dumps(self.logins),
content_type='application/json'
)
self.data = json.loads(self.login.get_data(as_text=True))
# get the token to be used by tests
self.token = self.data['auth_token'] |
image_subsets.py | #!/bin/python
# -*- coding: utf-8 -*-
import comparators.pixel_comparator
import comparators.chained_image_comparator
import comparators.avg_pixel_comparator
import image_iterator
import sys
# Not really sets, since they may contain "duplicate" images(ie ones that when compared return True)
'''
Input:
image_sets: A list of sets where each set contains a list of file paths for the images to be compared
comparator: A comparator that will be used to compare the images
Output:
The common subset of all the image_sets
'''
def get_common_subset_of_image_sets(image_sets, comparator):
if len(image_subsets) == 0:
return None
current_image_subset = image_sets[0]
for image_set in image_sets[1:]:
# Refine the subset
current_image_subset = intersection_of_sets(current_image_subset, image_set, comparator)
return current_image_set
'''
Performs a block based intersection of the two image sets image_set1 and image_set2 using the comparator specified by the comparator parameter
'''
def intersection_of_sets(image_set1, image_set2, comparator):
subset = []
# Done this way so we don't keep more that 2*image_iterator.max_images_per_block
for images in image_iterator.load_images(image_set1):
for other_images in image_iterator.load_images(image_set2):
# Load 2 blocks of images into memory, run a comparison of every image in the first block against all images in the second block.
for image in images:
for other_image in other_images:
if comparator.compare(image[1], other_image[1]):
subset.append(image[0])
break
return subset
def parse_subsets(subset_file):
subsets = []
curr_subset = []
curr_line = subset_file.readline()
while curr_line != '':
if curr_line.strip() == '' and len(curr_subset) > 0:
subsets.append(curr_subset)
curr_subset = []
elif curr_line.strip() != '':
curr_subset.append(curr_line.strip())
curr_line = subset_file.readline()
if len(curr_subset) > 0:
subsets.append(curr_subset)
return subsets
def usage():
print "python image_subsets.py <image_sets_file>"
'''
Will parse a file were grouped lines of file paths are considered sets of images. Then find the common subset of these sets.
Example:
test_file.txt
-------------
C:\some_file.bmp
C:\some_other_file.bmp
C:\another_file.jpg
C:\final_file.gif
----------------
This will produce 3 sets
1: ['C:\some_file.bmp', 'C:\some_other_file.bmp']
2: ['C:\another_file.jpg']
3: ['C:\final_file.gif']
'''
if __name__=="__main__":
if len(sys.argv) == 2:
image_iterator.max_images_per_block = 1
try:
image_subsets = None
with open(sys.argv[1], 'r') as subset_file:
image_subsets = parse_subsets(subset_file)
print image_subsets
if image_subsets:
chained_comparators = [comparators.pixel_comparator.PixelComparator(), comparators.avg_pixel_comparator.AvgPixelComparator()]
comparator = comparators.chained_image_comparator.ChainedImageComparator(image_comparators=chained_comparators)
print get_common_subset_of_image_sets(image_subsets, comparator)
except Exception as e:
print e
usage()
else: | usage() |
|
entpb_vehicle_service.go | // Code generated by protoc-gen-entgrpc. DO NOT EDIT.
package entpb
import (
context "context"
runtime "entgo.io/contrib/entproto/runtime"
sqlgraph "entgo.io/ent/dialect/sql/sqlgraph"
empty "github.com/golang/protobuf/ptypes/empty"
ent "github.com/open-farms/inventory/ent"
location "github.com/open-farms/inventory/ent/location"
vehicle "github.com/open-farms/inventory/ent/vehicle"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
emptypb "google.golang.org/protobuf/types/known/emptypb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
)
// VehicleService implements VehicleServiceServer
type VehicleService struct {
client *ent.Client
UnimplementedVehicleServiceServer
}
// NewVehicleService returns a new VehicleService
func NewVehicleService(client *ent.Client) *VehicleService {
return &VehicleService{
client: client,
}
}
// toProtoVehicle transforms the ent type to the pb type
func toProtoVehicle(e *ent.Vehicle) (*Vehicle, error) {
v := &Vehicle{}
active := e.Active
v.Active = active
createtime := timestamppb.New(e.CreateTime)
v.CreateTime = createtime
hours := e.Hours
v.Hours = hours
id := int32(e.ID)
v.Id = id
make := e.Make
v.Make = make
model := e.Model
v.Model = model
power := wrapperspb.String(e.Power)
v.Power = power
updatetime := timestamppb.New(e.UpdateTime)
v.UpdateTime = updatetime
year := wrapperspb.Int64(e.Year)
v.Year = year
if edg := e.Edges.Location; edg != nil {
id := int32(edg.ID)
v.Location = &Location{ | }
return v, nil
}
// Create implements VehicleServiceServer.Create
func (svc *VehicleService) Create(ctx context.Context, req *CreateVehicleRequest) (*Vehicle, error) {
vehicle := req.GetVehicle()
m := svc.client.Vehicle.Create()
vehicleActive := vehicle.GetActive()
m.SetActive(vehicleActive)
vehicleCreateTime := runtime.ExtractTime(vehicle.GetCreateTime())
m.SetCreateTime(vehicleCreateTime)
vehicleHours := int64(vehicle.GetHours())
m.SetHours(vehicleHours)
vehicleMake := vehicle.GetMake()
m.SetMake(vehicleMake)
vehicleModel := vehicle.GetModel()
m.SetModel(vehicleModel)
if vehicle.GetPower() != nil {
vehiclePower := vehicle.GetPower().GetValue()
m.SetPower(vehiclePower)
}
vehicleUpdateTime := runtime.ExtractTime(vehicle.GetUpdateTime())
m.SetUpdateTime(vehicleUpdateTime)
if vehicle.GetYear() != nil {
vehicleYear := int64(vehicle.GetYear().GetValue())
m.SetYear(vehicleYear)
}
vehicleLocation := int(vehicle.GetLocation().GetId())
m.SetLocationID(vehicleLocation)
res, err := m.Save(ctx)
switch {
case err == nil:
proto, err := toProtoVehicle(res)
if err != nil {
return nil, status.Errorf(codes.Internal, "internal error: %s", err)
}
return proto, nil
case sqlgraph.IsUniqueConstraintError(err):
return nil, status.Errorf(codes.AlreadyExists, "already exists: %s", err)
case ent.IsConstraintError(err):
return nil, status.Errorf(codes.InvalidArgument, "invalid argument: %s", err)
default:
return nil, status.Errorf(codes.Internal, "internal error: %s", err)
}
}
// Get implements VehicleServiceServer.Get
func (svc *VehicleService) Get(ctx context.Context, req *GetVehicleRequest) (*Vehicle, error) {
var (
err error
get *ent.Vehicle
)
id := int(req.GetId())
switch req.GetView() {
case GetVehicleRequest_VIEW_UNSPECIFIED, GetVehicleRequest_BASIC:
get, err = svc.client.Vehicle.Get(ctx, id)
case GetVehicleRequest_WITH_EDGE_IDS:
get, err = svc.client.Vehicle.Query().
Where(vehicle.ID(id)).
WithLocation(func(query *ent.LocationQuery) {
query.Select(location.FieldID)
}).
Only(ctx)
default:
return nil, status.Error(codes.InvalidArgument, "invalid argument: unknown view")
}
switch {
case err == nil:
return toProtoVehicle(get)
case ent.IsNotFound(err):
return nil, status.Errorf(codes.NotFound, "not found: %s", err)
default:
return nil, status.Errorf(codes.Internal, "internal error: %s", err)
}
return nil, nil
}
// Update implements VehicleServiceServer.Update
func (svc *VehicleService) Update(ctx context.Context, req *UpdateVehicleRequest) (*Vehicle, error) {
vehicle := req.GetVehicle()
vehicleID := int(vehicle.GetId())
m := svc.client.Vehicle.UpdateOneID(vehicleID)
vehicleActive := vehicle.GetActive()
m.SetActive(vehicleActive)
vehicleCreateTime := runtime.ExtractTime(vehicle.GetCreateTime())
m.SetCreateTime(vehicleCreateTime)
vehicleHours := int64(vehicle.GetHours())
m.SetHours(vehicleHours)
vehicleMake := vehicle.GetMake()
m.SetMake(vehicleMake)
vehicleModel := vehicle.GetModel()
m.SetModel(vehicleModel)
if vehicle.GetPower() != nil {
vehiclePower := vehicle.GetPower().GetValue()
m.SetPower(vehiclePower)
}
vehicleUpdateTime := runtime.ExtractTime(vehicle.GetUpdateTime())
m.SetUpdateTime(vehicleUpdateTime)
if vehicle.GetYear() != nil {
vehicleYear := int64(vehicle.GetYear().GetValue())
m.SetYear(vehicleYear)
}
vehicleLocation := int(vehicle.GetLocation().GetId())
m.SetLocationID(vehicleLocation)
res, err := m.Save(ctx)
switch {
case err == nil:
proto, err := toProtoVehicle(res)
if err != nil {
return nil, status.Errorf(codes.Internal, "internal error: %s", err)
}
return proto, nil
case sqlgraph.IsUniqueConstraintError(err):
return nil, status.Errorf(codes.AlreadyExists, "already exists: %s", err)
case ent.IsConstraintError(err):
return nil, status.Errorf(codes.InvalidArgument, "invalid argument: %s", err)
default:
return nil, status.Errorf(codes.Internal, "internal error: %s", err)
}
}
// Delete implements VehicleServiceServer.Delete
func (svc *VehicleService) Delete(ctx context.Context, req *DeleteVehicleRequest) (*empty.Empty, error) {
var err error
id := int(req.GetId())
err = svc.client.Vehicle.DeleteOneID(id).Exec(ctx)
switch {
case err == nil:
return &emptypb.Empty{}, nil
case ent.IsNotFound(err):
return nil, status.Errorf(codes.NotFound, "not found: %s", err)
default:
return nil, status.Errorf(codes.Internal, "internal error: %s", err)
}
} | Id: id,
} |
test_base.py |
class TestBase(pyproctor.TestBase):
@classmethod
def setUpClass(cls):
"""
This exists to make sure that no matter what, tests
will log on stdout. Every call to basicConfig after
this point will be a no-op
"""
# AGI-731
# See jira for more information
#
# https://github.com/gabrielfalcao/HTTPretty/issues/280
#
# logging.basicConfig(
# stream=sys.stdout
# ) | import pyproctor |
|
Sort Array By Parity.py | class Solution(object):
| def sortArrayByParity(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
result = []
for i in A:
if i%2 == 0:
result.insert(0,i)
else:
result.append(i)
return result |
|
model.py | #!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""DrQA Document Reader model"""
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import logging
import copy
from torch.autograd import Variable
from .config import override_model_args
from .rnn_reader import RnnDocReader
logger = logging.getLogger(__name__)
class DocReader(object):
"""High level model that handles intializing the underlying network
architecture, saving, updating examples, and predicting examples.
"""
# --------------------------------------------------------------------------
# Initialization
# --------------------------------------------------------------------------
def __init__(self, args, word_dict, feature_dict,
state_dict=None, normalize=True):
# Book-keeping.
self.args = args
self.word_dict = word_dict
self.args.vocab_size = len(word_dict)
self.feature_dict = feature_dict
self.args.num_features = len(feature_dict)
self.updates = 0
self.use_cuda = False
self.parallel = False
# Building network. If normalize if false, scores are not normalized
# 0-1 per paragraph (no softmax).
if args.model_type == 'rnn':
self.network = RnnDocReader(args, normalize)
else:
raise RuntimeError('Unsupported model: %s' % args.model_type)
# Load saved state
if state_dict:
# Load buffer separately
if 'fixed_embedding' in state_dict:
fixed_embedding = state_dict.pop('fixed_embedding')
self.network.load_state_dict(state_dict)
self.network.register_buffer('fixed_embedding', fixed_embedding)
else:
self.network.load_state_dict(state_dict)
def expand_dictionary(self, words):
"""Add words to the DocReader dictionary if they do not exist. The
underlying embedding matrix is also expanded (with random embeddings).
Args:
words: iterable of tokens to add to the dictionary.
Output:
added: set of tokens that were added.
"""
to_add = {self.word_dict.normalize(w) for w in words
if w not in self.word_dict}
# Add words to dictionary and expand embedding layer
if len(to_add) > 0:
logger.info('Adding %d new words to dictionary...' % len(to_add))
for w in to_add:
self.word_dict.add(w)
self.args.vocab_size = len(self.word_dict)
logger.info('New vocab size: %d' % len(self.word_dict))
old_embedding = self.network.embedding.weight.data
self.network.embedding = torch.nn.Embedding(self.args.vocab_size,
self.args.embedding_dim,
padding_idx=0)
new_embedding = self.network.embedding.weight.data
new_embedding[:old_embedding.size(0)] = old_embedding
# Return added words
return to_add
def load_embeddings(self, words, embedding_file):
"""Load pretrained embeddings for a given list of words, if they exist.
Args:
words: iterable of tokens. Only those that are indexed in the
dictionary are kept.
embedding_file: path to text file of embeddings, space separated.
"""
words = {w for w in words if w in self.word_dict}
logger.info('Loading pre-trained embeddings for %d words from %s' %
(len(words), embedding_file))
embedding = self.network.embedding.weight.data
# When normalized, some words are duplicated. (Average the embeddings).
vec_counts = {}
with open(embedding_file) as f:
for line in f:
parsed = line.rstrip().split(' ')
assert(len(parsed) == embedding.size(1) + 1)
w = self.word_dict.normalize(parsed[0])
if w in words:
vec = torch.Tensor([float(i) for i in parsed[1:]])
if w not in vec_counts:
vec_counts[w] = 1
embedding[self.word_dict[w]].copy_(vec)
else:
logging.warning(
'WARN: Duplicate embedding found for %s' % w
)
vec_counts[w] = vec_counts[w] + 1
embedding[self.word_dict[w]].add_(vec)
for w, c in vec_counts.items():
embedding[self.word_dict[w]].div_(c)
logger.info('Loaded %d embeddings (%.2f%%)' %
(len(vec_counts), 100 * len(vec_counts) / len(words)))
def tune_embeddings(self, words):
|
def init_optimizer(self, state_dict=None):
"""Initialize an optimizer for the free parameters of the network.
Args:
state_dict: network parameters
"""
if self.args.fix_embeddings:
for p in self.network.embedding.parameters():
p.requires_grad = False
parameters = [p for p in self.network.parameters() if p.requires_grad]
if self.args.optimizer == 'sgd':
self.optimizer = optim.SGD(parameters, self.args.learning_rate,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay)
elif self.args.optimizer == 'adamax':
self.optimizer = optim.Adamax(parameters,
weight_decay=self.args.weight_decay)
else:
raise RuntimeError('Unsupported optimizer: %s' %
self.args.optimizer)
# --------------------------------------------------------------------------
# Learning
# --------------------------------------------------------------------------
def update(self, ex):
"""Forward a batch of examples; step the optimizer to update weights."""
if not self.optimizer:
raise RuntimeError('No optimizer set.')
# Train mode
self.network.train()
# Transfer to GPU
if self.use_cuda:
inputs = [e if e is None else Variable(e.cuda(async=True))
for e in ex[:5]]
target_s = Variable(ex[5].cuda(async=True))
target_e = Variable(ex[6].cuda(async=True))
else:
inputs = [e if e is None else Variable(e) for e in ex[:5]]
target_s = Variable(ex[5])
target_e = Variable(ex[6])
# Run forward
score_s, score_e = self.network(*inputs)
# Compute loss and accuracies
loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
# Clear gradients and run backward
self.optimizer.zero_grad()
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm(self.network.parameters(),
self.args.grad_clipping)
# Update parameters
self.optimizer.step()
self.updates += 1
# Reset any partially fixed parameters (e.g. rare words)
self.reset_parameters()
return loss.data[0], ex[0].size(0)
def reset_parameters(self):
"""Reset any partially fixed parameters to original states."""
# Reset fixed embeddings to original value
if self.args.tune_partial > 0:
if self.parallel:
embedding = self.network.module.embedding.weight.data
fixed_embedding = self.network.module.fixed_embedding
else:
embedding = self.network.embedding.weight.data
fixed_embedding = self.network.fixed_embedding
# Embeddings to fix are the last indices
offset = embedding.size(0) - fixed_embedding.size(0)
if offset >= 0:
embedding[offset:] = fixed_embedding
# --------------------------------------------------------------------------
# Prediction
# --------------------------------------------------------------------------
def predict(self, ex, candidates=None, top_n=1, async_pool=None):
"""Forward a batch of examples only to get predictions.
Args:
ex: the batch
candidates: batch * variable length list of string answer options.
The model will only consider exact spans contained in this list.
top_n: Number of predictions to return per batch element.
async_pool: If provided, non-gpu post-processing will be offloaded
to this CPU process pool.
Output:
pred_s: batch * top_n predicted start indices
pred_e: batch * top_n predicted end indices
pred_score: batch * top_n prediction scores
If async_pool is given, these will be AsyncResult handles.
"""
# Eval mode
self.network.eval()
# Transfer to GPU
if self.use_cuda:
inputs = [e if e is None else
Variable(e.cuda(async=True), volatile=True)
for e in ex[:5]]
else:
inputs = [e if e is None else Variable(e, volatile=True)
for e in ex[:5]]
# Run forward
score_s, score_e = self.network(*inputs)
# Decode predictions
score_s = score_s.data.cpu()
score_e = score_e.data.cpu()
if candidates:
args = (score_s, score_e, candidates, top_n, self.args.max_len)
if async_pool:
return async_pool.apply_async(self.decode_candidates, args)
else:
return self.decode_candidates(*args)
else:
args = (score_s, score_e, top_n, self.args.max_len)
if async_pool:
return async_pool.apply_async(self.decode, args)
else:
return self.decode(*args)
@staticmethod
def decode(score_s, score_e, top_n=1, max_len=None):
"""Take argmax of constrained score_s * score_e.
Args:
score_s: independent start predictions
score_e: independent end predictions
top_n: number of top scored pairs to take
max_len: max span length to consider
"""
pred_s = []
pred_e = []
pred_score = []
max_len = max_len or score_s.size(1)
for i in range(score_s.size(0)):
# Outer product of scores to get full p_s * p_e matrix
scores = torch.ger(score_s[i], score_e[i])
# Zero out negative length and over-length span scores
scores.triu_().tril_(max_len - 1)
# Take argmax or top n
scores = scores.numpy()
scores_flat = scores.flatten()
if top_n == 1:
idx_sort = [np.argmax(scores_flat)]
elif len(scores_flat) < top_n:
idx_sort = np.argsort(-scores_flat)
else:
idx = np.argpartition(-scores_flat, top_n)[0:top_n]
idx_sort = idx[np.argsort(-scores_flat[idx])]
s_idx, e_idx = np.unravel_index(idx_sort, scores.shape)
pred_s.append(s_idx)
pred_e.append(e_idx)
pred_score.append(scores_flat[idx_sort])
return pred_s, pred_e, pred_score
@staticmethod
def decode_candidates(score_s, score_e, candidates, top_n=1, max_len=None):
"""Take argmax of constrained score_s * score_e. Except only consider
spans that are in the candidates list.
"""
pred_s = []
pred_e = []
pred_score = []
for i in range(score_s.size(0)):
# Extract original tokens stored with candidates
tokens = candidates[i]['input']
cands = candidates[i]['cands']
if not cands:
# try getting from globals? (multiprocessing in pipeline mode)
from ..pipeline.drqa import PROCESS_CANDS
cands = PROCESS_CANDS
if not cands:
raise RuntimeError('No candidates given.')
# Score all valid candidates found in text.
# Brute force get all ngrams and compare against the candidate list.
max_len = max_len or len(tokens)
scores, s_idx, e_idx = [], [], []
for s, e in tokens.ngrams(n=max_len, as_strings=False):
span = tokens.slice(s, e).untokenize()
if span in cands or span.lower() in cands:
# Match! Record its score.
scores.append(score_s[i][s] * score_e[i][e - 1])
s_idx.append(s)
e_idx.append(e - 1)
if len(scores) == 0:
# No candidates present
pred_s.append([])
pred_e.append([])
pred_score.append([])
else:
# Rank found candidates
scores = np.array(scores)
s_idx = np.array(s_idx)
e_idx = np.array(e_idx)
idx_sort = np.argsort(-scores)[0:top_n]
pred_s.append(s_idx[idx_sort])
pred_e.append(e_idx[idx_sort])
pred_score.append(scores[idx_sort])
return pred_s, pred_e, pred_score
# --------------------------------------------------------------------------
# Saving and loading
# --------------------------------------------------------------------------
def save(self, filename):
if self.parallel:
network = self.network.module
else:
network = self.network
state_dict = copy.copy(network.state_dict())
if 'fixed_embedding' in state_dict:
state_dict.pop('fixed_embedding')
params = {
'state_dict': state_dict,
'word_dict': self.word_dict,
'feature_dict': self.feature_dict,
'args': self.args,
}
try:
torch.save(params, filename)
except BaseException:
logger.warning('WARN: Saving failed... continuing anyway.')
def checkpoint(self, filename, epoch):
if self.parallel:
network = self.network.module
else:
network = self.network
params = {
'state_dict': network.state_dict(),
'word_dict': self.word_dict,
'feature_dict': self.feature_dict,
'args': self.args,
'epoch': epoch,
'optimizer': self.optimizer.state_dict(),
}
try:
torch.save(params, filename)
except BaseException:
logger.warning('WARN: Saving failed... continuing anyway.')
@staticmethod
def load(filename, new_args=None, normalize=True):
logger.info('Loading model %s' % filename)
saved_params = torch.load(
filename, map_location=lambda storage, loc: storage
)
word_dict = saved_params['word_dict']
feature_dict = saved_params['feature_dict']
state_dict = saved_params['state_dict']
args = saved_params['args']
if new_args:
args = override_model_args(args, new_args)
return DocReader(args, word_dict, feature_dict, state_dict, normalize)
@staticmethod
def load_checkpoint(filename, normalize=True):
logger.info('Loading model %s' % filename)
saved_params = torch.load(
filename, map_location=lambda storage, loc: storage
)
word_dict = saved_params['word_dict']
feature_dict = saved_params['feature_dict']
state_dict = saved_params['state_dict']
epoch = saved_params['epoch']
optimizer = saved_params['optimizer']
args = saved_params['args']
model = DocReader(args, word_dict, feature_dict, state_dict, normalize)
model.init_optimizer(optimizer)
return model, epoch
# --------------------------------------------------------------------------
# Runtime
# --------------------------------------------------------------------------
def cuda(self):
self.use_cuda = True
self.network = self.network.cuda()
def cpu(self):
self.use_cuda = False
self.network = self.network.cpu()
def parallelize(self):
"""Use data parallel to copy the model across several gpus.
This will take all gpus visible with CUDA_VISIBLE_DEVICES.
"""
self.parallel = True
self.network = torch.nn.DataParallel(self.network)
| """Unfix the embeddings of a list of words. This is only relevant if
only some of the embeddings are being tuned (tune_partial = N).
Shuffles the N specified words to the front of the dictionary, and saves
the original vectors of the other N + 1:vocab words in a fixed buffer.
Args:
words: iterable of tokens contained in dictionary.
"""
words = {w for w in words if w in self.word_dict}
if len(words) == 0:
logger.warning('Tried to tune embeddings, but no words given!')
return
if len(words) == len(self.word_dict):
logger.warning('Tuning ALL embeddings in dictionary')
return
# Shuffle words and vectors
embedding = self.network.embedding.weight.data
for idx, swap_word in enumerate(words, self.word_dict.START):
# Get current word + embedding for this index
curr_word = self.word_dict[idx]
curr_emb = embedding[idx].clone()
old_idx = self.word_dict[swap_word]
# Swap embeddings + dictionary indices
embedding[idx].copy_(embedding[old_idx])
embedding[old_idx].copy_(curr_emb)
self.word_dict[swap_word] = idx
self.word_dict[idx] = swap_word
self.word_dict[curr_word] = old_idx
self.word_dict[old_idx] = curr_word
# Save the original, fixed embeddings
self.network.register_buffer(
'fixed_embedding', embedding[idx + 1:].clone()
) |
leetcode_2078_two_furthest_houses_with_different_colors.py | # @l2g 2078 python3
# [2078] Two Furthest Houses With Different Colors
# Difficulty: Easy
# https://leetcode.com/problems/two-furthest-houses-with-different-colors
#
# There are n houses evenly lined up on the street,and each house is beautifully painted.
# You are given a 0-indexed integer array colors of length n,
# where colors[i] represents the color of the ith house.
# Return the maximum distance between two houses with different colors.
# The distance between the ith and jth houses is abs(i - j), where abs(x) is the absolute value of x.
#
# Example 1:
#
#
# Input: colors = [1,1,1,6,1,1,1]
# Output: 3
# Explanation: In the above image, color 1 is blue, and color 6 is red.
# The furthest two houses with different colors are house 0 and house 3.
# House 0 has color 1, and house 3 has color 6. The distance between them is abs(0 - 3) = 3.
# Note that houses 3 and 6 can also produce the optimal answer.
#
# Example 2:
#
#
# Input: colors = [1,8,3,8,3]
# Output: 4
# Explanation: In the above image, color 1 is blue, color 8 is yellow, and color 3 is green.
# The furthest two houses with different colors are house 0 and house 4.
# House 0 has color 1, and house 4 has color 3. The distance between them is abs(0 - 4) = 4.
#
# Example 3:
#
# Input: colors = [0,1]
# Output: 1
# Explanation: The furthest two houses with different colors are house 0 and house 1.
# House 0 has color 0, and house 1 has color 1. The distance between them is abs(0 - 1) = 1.
#
#
# Constraints:
#
# n == colors.length
# 2 <= n <= 100
# 0 <= colors[i] <= 100
# Test data are generated such that at least two houses have different colors.
#
#
from typing import List
class Solution:
d |
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_2078.py")])
| ef maxDistance(self, colors: List[int]) -> int:
for i in range(len(colors)):
if colors[i] != colors[-1] or colors[0] != colors[-1 - i]:
return len(colors) - i - 1
return 1
|
login-service.ts | import { Injectable } from '@angular/core';
@Injectable()
export class | {
constructor() { }
/* Login Universal Data
==============================*/
getDataForLoginFlat = () => {
let data = {
"logo": "assets/images/csform-logo.png",
"btnLogin": "Login",
"txtUsername" : "Username",
"txtPassword" : "Password",
"txtForgotPassword" : "Forgot password?",
"btnResetYourPassword": "Reset your password",
"txtSignupnow" : "Don't have an account?",
"btnSignupnow": "Signup now",
"title": "Welcome back,",
"subtitle": "please login to your account.",
"errorUser" : "Field can't be empty.",
"errorPassword" : "Field can't be empty."
};
return data;
};
}
| LoginService |
create_customers_parameters.go | // Code generated by go-swagger; DO NOT EDIT.
package customers
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/TomerHeber/avatax-v2-go/models"
)
// NewCreateCustomersParams creates a new CreateCustomersParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewCreateCustomersParams() *CreateCustomersParams {
return &CreateCustomersParams{
timeout: cr.DefaultTimeout,
}
}
// NewCreateCustomersParamsWithTimeout creates a new CreateCustomersParams object
// with the ability to set a timeout on a request.
func NewCreateCustomersParamsWithTimeout(timeout time.Duration) *CreateCustomersParams {
return &CreateCustomersParams{
timeout: timeout,
}
}
// NewCreateCustomersParamsWithContext creates a new CreateCustomersParams object
// with the ability to set a context for a request.
func NewCreateCustomersParamsWithContext(ctx context.Context) *CreateCustomersParams {
return &CreateCustomersParams{
Context: ctx,
}
}
// NewCreateCustomersParamsWithHTTPClient creates a new CreateCustomersParams object
// with the ability to set a custom HTTPClient for a request.
func NewCreateCustomersParamsWithHTTPClient(client *http.Client) *CreateCustomersParams |
/* CreateCustomersParams contains all the parameters to send to the API endpoint
for the create customers operation.
Typically these are written to a http.Request.
*/
type CreateCustomersParams struct {
/* XAvalaraClient.
Identifies the software you are using to call this API. For more information on the client header, see [Client Headers](https://developer.avalara.com/avatax/client-headers/) .
Default: "Swagger UI; 21.12.0; Custom; 1.0"
*/
XAvalaraClient *string
/* Body.
The list of customer objects to be created
*/
Body []*models.CustomerModel
/* CompanyID.
The unique ID number of the company that recorded this customer
Format: int32
*/
CompanyID int32
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the create customers params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *CreateCustomersParams) WithDefaults() *CreateCustomersParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the create customers params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *CreateCustomersParams) SetDefaults() {
var (
xAvalaraClientDefault = string("Swagger UI; 21.12.0; Custom; 1.0")
)
val := CreateCustomersParams{
XAvalaraClient: &xAvalaraClientDefault,
}
val.timeout = o.timeout
val.Context = o.Context
val.HTTPClient = o.HTTPClient
*o = val
}
// WithTimeout adds the timeout to the create customers params
func (o *CreateCustomersParams) WithTimeout(timeout time.Duration) *CreateCustomersParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the create customers params
func (o *CreateCustomersParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the create customers params
func (o *CreateCustomersParams) WithContext(ctx context.Context) *CreateCustomersParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the create customers params
func (o *CreateCustomersParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the create customers params
func (o *CreateCustomersParams) WithHTTPClient(client *http.Client) *CreateCustomersParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the create customers params
func (o *CreateCustomersParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithXAvalaraClient adds the xAvalaraClient to the create customers params
func (o *CreateCustomersParams) WithXAvalaraClient(xAvalaraClient *string) *CreateCustomersParams {
o.SetXAvalaraClient(xAvalaraClient)
return o
}
// SetXAvalaraClient adds the xAvalaraClient to the create customers params
func (o *CreateCustomersParams) SetXAvalaraClient(xAvalaraClient *string) {
o.XAvalaraClient = xAvalaraClient
}
// WithBody adds the body to the create customers params
func (o *CreateCustomersParams) WithBody(body []*models.CustomerModel) *CreateCustomersParams {
o.SetBody(body)
return o
}
// SetBody adds the body to the create customers params
func (o *CreateCustomersParams) SetBody(body []*models.CustomerModel) {
o.Body = body
}
// WithCompanyID adds the companyID to the create customers params
func (o *CreateCustomersParams) WithCompanyID(companyID int32) *CreateCustomersParams {
o.SetCompanyID(companyID)
return o
}
// SetCompanyID adds the companyId to the create customers params
func (o *CreateCustomersParams) SetCompanyID(companyID int32) {
o.CompanyID = companyID
}
// WriteToRequest writes these params to a swagger request
func (o *CreateCustomersParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.XAvalaraClient != nil {
// header param X-Avalara-Client
if err := r.SetHeaderParam("X-Avalara-Client", *o.XAvalaraClient); err != nil {
return err
}
}
if o.Body != nil {
if err := r.SetBodyParam(o.Body); err != nil {
return err
}
}
// path param companyId
if err := r.SetPathParam("companyId", swag.FormatInt32(o.CompanyID)); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
| {
return &CreateCustomersParams{
HTTPClient: client,
}
} |
mod.rs | //! This module contains Yew's implementation of a reactive virtual DOM.
#[doc(hidden)]
pub mod key;
#[doc(hidden)]
pub mod vcomp;
#[doc(hidden)]
pub mod vlist;
#[doc(hidden)]
pub mod vnode;
#[doc(hidden)]
pub mod vtag;
#[doc(hidden)]
pub mod vtext;
use crate::html::{AnyScope, NodeRef};
use cfg_if::cfg_if;
use indexmap::set::IndexSet;
use std::collections::HashMap;
use std::fmt;
use std::rc::Rc;
cfg_if! {
if #[cfg(feature = "std_web")] {
use crate::html::EventListener;
use stdweb::web::{Element, INode, Node};
} else if #[cfg(feature = "web_sys")] {
use gloo::events::EventListener;
use web_sys::{Element, Node};
}
}
#[doc(inline)]
pub use self::key::Key;
#[doc(inline)]
pub use self::vcomp::{VChild, VComp};
#[doc(inline)]
pub use self::vlist::VList;
#[doc(inline)]
pub use self::vnode::VNode;
#[doc(inline)]
pub use self::vtag::VTag;
#[doc(inline)]
pub use self::vtext::VText;
/// The `Listener` trait is an universal implementation of an event listener
/// which is used to bind Rust-listener to JS-listener (DOM).
pub trait Listener {
/// Returns the name of the event
fn kind(&self) -> &'static str;
/// Attaches a listener to the element.
fn attach(&self, element: &Element) -> EventListener;
}
impl fmt::Debug for dyn Listener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Listener {{ kind: {} }}", self.kind())
}
}
/// A list of event listeners.
type Listeners = Vec<Rc<dyn Listener>>;
/// A map of attributes.
type Attributes = HashMap<String, String>;
/// A set of classes.
#[derive(Debug, Clone, Default)]
pub struct Classes {
set: IndexSet<String>,
}
impl Classes {
/// Creates an empty set of classes.
pub fn new() -> Self {
Self {
set: IndexSet::new(),
}
}
/// Adds a class to a set.
///
/// If the provided class has already been added, this method will ignore it.
pub fn push(&mut self, class: &str) {
let classes_to_add: Classes = class.into();
self.set.extend(classes_to_add.set);
}
/// Check the set contains a class.
pub fn contains(&self, class: &str) -> bool {
self.set.contains(class)
}
/// Check the set is empty.
pub fn | (&self) -> bool {
self.set.is_empty()
}
/// Adds other classes to this set of classes; returning itself.
///
/// Takes the logical union of both `Classes`.
pub fn extend<T: Into<Classes>>(mut self, other: T) -> Self {
self.set.extend(other.into().set.into_iter());
self
}
}
impl ToString for Classes {
fn to_string(&self) -> String {
self.set
.iter()
.map(String::as_str)
.collect::<Vec<&str>>()
.join(" ")
}
}
impl From<&str> for Classes {
fn from(t: &str) -> Self {
let set = t
.split_whitespace()
.map(String::from)
.filter(|c| !c.is_empty())
.collect();
Self { set }
}
}
impl From<String> for Classes {
fn from(t: String) -> Self {
Classes::from(t.as_str())
}
}
impl From<&String> for Classes {
fn from(t: &String) -> Self {
Classes::from(t.as_str())
}
}
impl<T: AsRef<str>> From<Option<T>> for Classes {
fn from(t: Option<T>) -> Self {
t.as_ref()
.map(|s| <Classes as From<&str>>::from(s.as_ref()))
.unwrap_or_default()
}
}
impl<T: AsRef<str>> From<&Option<T>> for Classes {
fn from(t: &Option<T>) -> Self {
t.as_ref()
.map(|s| <Classes as From<&str>>::from(s.as_ref()))
.unwrap_or_default()
}
}
impl<T: AsRef<str>> From<Vec<T>> for Classes {
fn from(t: Vec<T>) -> Self {
Classes::from(t.as_slice())
}
}
impl<T: AsRef<str>> From<&[T]> for Classes {
fn from(t: &[T]) -> Self {
let set = t
.iter()
.map(|x| x.as_ref())
.flat_map(|s| s.split_whitespace())
.map(String::from)
.filter(|c| !c.is_empty())
.collect();
Self { set }
}
}
impl PartialEq for Classes {
fn eq(&self, other: &Self) -> bool {
self.set.len() == other.set.len() && self.set.iter().eq(other.set.iter())
}
}
/// Patch for DOM node modification.
#[derive(Debug, PartialEq)]
enum Patch<ID, T> {
Add(ID, T),
Replace(ID, T),
Remove(ID),
}
// TODO(#938): What about implementing `VDiff` for `Element`?
// It would make it possible to include ANY element into the tree.
// `Ace` editor embedding for example?
/// This trait provides features to update a tree by calculating a difference against another tree.
pub(crate) trait VDiff {
/// Remove self from parent.
fn detach(&mut self, parent: &Element);
/// Scoped diff apply to other tree.
///
/// Virtual rendering for the node. It uses parent node and existing
/// children (virtual and DOM) to check the difference and apply patches to
/// the actual DOM representation.
///
/// Parameters:
/// - `parent_scope`: the parent `Scope` used for passing messages to the
/// parent `Component`.
/// - `parent`: the parent node in the DOM.
/// - `next_sibling`: the next sibling, used to efficiently find where to
/// put the node.
/// - `ancestor`: the node that this node will be replacing in the DOM. This
/// method will _always_ remove the `ancestor` from the `parent`.
///
/// Returns a reference to the newly inserted element.
///
/// ### Internal Behavior Notice:
///
/// Note that these modify the DOM by modifying the reference that _already_
/// exists on the `ancestor`. If `self.reference` exists (which it
/// _shouldn't_) this method will panic.
///
/// The exception to this is obviously `VRef` which simply uses the inner
/// `Node` directly (always removes the `Node` that exists).
fn apply(
&mut self,
parent_scope: &AnyScope,
parent: &Element,
next_sibling: NodeRef,
ancestor: Option<VNode>,
) -> NodeRef;
}
#[cfg(feature = "web_sys")]
fn insert_node(node: &Node, parent: &Element, next_sibling: Option<Node>) {
match next_sibling {
Some(next_sibling) => parent
.insert_before(&node, Some(&next_sibling))
.expect("failed to insert tag before next sibling"),
None => parent.append_child(node).expect("failed to append child"),
};
}
#[cfg(feature = "std_web")]
fn insert_node(node: &impl INode, parent: &impl INode, next_sibling: Option<Node>) {
if let Some(next_sibling) = next_sibling {
parent
.insert_before(node, &next_sibling)
.expect("failed to insert tag before next sibling");
} else {
parent.append_child(node);
}
}
/// Transform properties to the expected type.
pub trait Transformer<FROM, TO> {
/// Transforms one type to another.
fn transform(from: FROM) -> TO;
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_is_initially_empty() {
let subject = Classes::new();
assert!(subject.is_empty());
}
#[test]
fn it_pushes_value() {
let mut subject = Classes::new();
subject.push("foo");
assert!(!subject.is_empty());
assert!(subject.contains("foo"));
}
#[test]
fn it_adds_values_via_extend() {
let mut other = Classes::new();
other.push("bar");
let subject = Classes::new().extend(other);
assert!(subject.contains("bar"));
}
#[test]
fn it_contains_both_values() {
let mut other = Classes::new();
other.push("bar");
let mut subject = Classes::new().extend(other);
subject.push("foo");
assert!(subject.contains("foo"));
assert!(subject.contains("bar"));
}
#[test]
fn it_splits_class_with_spaces() {
let mut subject = Classes::new();
subject.push("foo bar");
assert!(subject.contains("foo"));
assert!(subject.contains("bar"));
}
}
// stdweb lacks the `inner_html` method
#[cfg(all(test, feature = "web_sys"))]
mod layout_tests {
use super::*;
use crate::html::{AnyScope, Scope};
use crate::{Component, ComponentLink, Html, ShouldRender};
struct Comp;
impl Component for Comp {
type Message = ();
type Properties = ();
fn create(_: Self::Properties, _: ComponentLink<Self>) -> Self {
unimplemented!()
}
fn update(&mut self, _: Self::Message) -> ShouldRender {
unimplemented!();
}
fn change(&mut self, _: Self::Properties) -> ShouldRender {
unimplemented!()
}
fn view(&self) -> Html {
unimplemented!()
}
}
pub(crate) struct TestLayout<'a> {
pub(crate) name: &'a str,
pub(crate) node: VNode,
pub(crate) expected: &'a str,
}
pub(crate) fn diff_layouts(layouts: Vec<TestLayout<'_>>) {
let document = crate::utils::document();
let parent_scope: AnyScope = Scope::<Comp>::new(None).into();
let parent_element = document.create_element("div").unwrap();
let parent_node: Node = parent_element.clone().into();
let end_node = document.create_text_node("END");
parent_node.append_child(&end_node).unwrap();
let empty_node: VNode = VText::new("".into()).into();
// Tests each layout independently
let next_sibling = NodeRef::new(end_node.into());
for layout in layouts.iter() {
// Apply the layout
let mut node = layout.node.clone();
wasm_bindgen_test::console_log!("Independently apply layout '{}'", layout.name);
node.apply(&parent_scope, &parent_element, next_sibling.clone(), None);
assert_eq!(
parent_element.inner_html(),
format!("{}END", layout.expected),
"Independent apply failed for layout '{}'",
layout.name,
);
// Diff with no changes
let mut node_clone = layout.node.clone();
wasm_bindgen_test::console_log!("Independently reapply layout '{}'", layout.name);
node_clone.apply(
&parent_scope,
&parent_element,
next_sibling.clone(),
Some(node),
);
assert_eq!(
parent_element.inner_html(),
format!("{}END", layout.expected),
"Independent reapply failed for layout '{}'",
layout.name,
);
// Detach
empty_node.clone().apply(
&parent_scope,
&parent_element,
next_sibling.clone(),
Some(node_clone),
);
assert_eq!(
parent_element.inner_html(),
"END",
"Independent detach failed for layout '{}'",
layout.name,
);
}
// Sequentially apply each layout
let mut ancestor: Option<VNode> = None;
for layout in layouts.iter() {
let mut next_node = layout.node.clone();
wasm_bindgen_test::console_log!("Sequentially apply layout '{}'", layout.name);
next_node.apply(
&parent_scope,
&parent_element,
next_sibling.clone(),
ancestor,
);
assert_eq!(
parent_element.inner_html(),
format!("{}END", layout.expected),
"Sequential apply failed for layout '{}'",
layout.name,
);
ancestor = Some(next_node);
}
// Sequentially detach each layout
for layout in layouts.into_iter().rev() {
let mut next_node = layout.node.clone();
wasm_bindgen_test::console_log!("Sequentially detach layout '{}'", layout.name);
next_node.apply(
&parent_scope,
&parent_element,
next_sibling.clone(),
ancestor,
);
assert_eq!(
parent_element.inner_html(),
format!("{}END", layout.expected),
"Sequential detach failed for layout '{}'",
layout.name,
);
ancestor = Some(next_node);
}
// Detach last layout
empty_node
.clone()
.apply(&parent_scope, &parent_element, next_sibling, ancestor);
assert_eq!(
parent_element.inner_html(),
"END",
"Failed to detach last layout"
);
}
}
| is_empty |
node_utils.py | #!/usr/bin/env python
'''module containing various functions for working with trees and nodes'''
from node_parser import NodeParser
import unittest
def depth(node):
'''compute the depth of the given tree'''
if node is None:
return 0
elif node.is_leaf():
return 1
else:
return 1 + max(list(map(depth, node.children())))
def depth_first_iterator(node):
'''returns an depth-first itreator over the node and its children'''
if node is not None:
node_stack = [(node, -1)]
while len(node_stack) > 0:
node, child_index = node_stack.pop()
if child_index == -1:
if not node.is_leaf():
node_stack.append((node, child_index + 1))
yield node
elif child_index < node.nr_children():
node_stack.append((node, child_index + 1))
node_stack.append((node.child(child_index), -1))
def nr_leaf_nodes(start_node):
'''returns the number of leaf nodes starting form the given node'''
nr = 0
for node in depth_first_iterator(start_node):
if node.is_leaf():
nr += 1
return nr
class DepthTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
self.assertEqual(depth(tree), 0)
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
self.assertEqual(depth(tree), 1)
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
self.assertEqual(depth(tree), 3)
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
self.assertEqual(depth(tree), 4)
class DepthFirstIteratorTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, [])
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1'])
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1', 'c2', 'c3', 'c4', 'c5', 'c6'])
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1', 'c2', 'c3', 'c4'])
class NrLeafsTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 0)
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 1)
def test_tree(self):
|
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 1)
if __name__ == '__main__':
unittest.main()
| parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 4) |
metrics.py | # Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to compare image pairs.
Images are assumed to be [0, 1] of floating point dtype with [N]HWC shapes.
Each image metric function returns a scalar for each image pair.
"""
import chex
import jax
import jax.numpy as jnp
import jax.scipy as jsp
def mae(a: chex.Array, b: chex.Array) -> chex.Numeric:
"""Returns the Mean Absolute Error between `a` and `b`.
Args:
a: First image (or set of images).
b: Second image (or set of images).
Returns:
MAE between `a` and `b`.
"""
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
return jnp.abs(a - b).mean(axis=(-3, -2, -1))
def mse(a: chex.Array, b: chex.Array) -> chex.Numeric:
"""Returns the Mean Squared Error between `a` and `b`.
Args:
a: First image (or set of images).
b: Second image (or set of images).
Returns:
MSE between `a` and `b`.
"""
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
return jnp.square(a - b).mean(axis=(-3, -2, -1))
def psnr(a: chex.Array, b: chex.Array) -> chex.Numeric:
"""Returns the Peak Signal-to-Noise Ratio between `a` and `b`.
Assumes that the dynamic range of the images (the difference between the
maximum and the minimum allowed values) is 1.0.
Args:
a: First image (or set of images).
b: Second image (or set of images).
Returns:
PSNR in decibels between `a` and `b`.
"""
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
return -10.0 * jnp.log(mse(a, b)) / jnp.log(10.0)
def | (a: chex.Array, b: chex.Array) -> chex.Numeric:
"""Returns the Root Mean Squared Error between `a` and `b`.
Args:
a: First image (or set of images).
b: Second image (or set of images).
Returns:
RMSE between `a` and `b`.
"""
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
return jnp.sqrt(mse(a, b))
def simse(a: chex.Array, b: chex.Array) -> chex.Numeric:
"""Returns the Scale-Invariant Mean Squared Error between `a` and `b`.
For each image pair, a scaling factor for `b` is computed as the solution to
the following problem:
min_alpha || vec(a) - alpha * vec(b) ||_2^2,
where `a` and `b` are flattened, i.e., vec(x) = np.flatten(x). The MSE between
the optimally scaled `b` and `a` is returned: mse(a, alpha*b).
This is a scale-invariant metric, so for example: simse(x, y) == sims(x, y*5).
This metric was used in "Shape, Illumination, and Reflectance from Shading" by
Barron and Malik, TPAMI, '15.
Args:
a: First image (or set of images).
b: Second image (or set of images).
Returns:
SIMSE between `a` and `b`.
"""
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
a_dot_b = (a * b).sum(axis=(-3, -2, -1), keepdims=True)
b_dot_b = (b * b).sum(axis=(-3, -2, -1), keepdims=True)
alpha = a_dot_b / b_dot_b
return mse(a, alpha * b)
def ssim(
a: chex.Array,
b: chex.Array,
*,
max_val: float = 1.0,
filter_size: int = 11,
filter_sigma: float = 1.5,
k1: float = 0.01,
k2: float = 0.03,
return_map: bool = False,
) -> chex.Numeric:
"""Computes the structural similarity index (SSIM) between image pairs.
This function is based on the standard SSIM implementation from:
Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli,
"Image quality assessment: from error visibility to structural similarity",
in IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, 2004.
This function was modeled after tf.image.ssim, and should produce comparable
output.
Note: the true SSIM is only defined on grayscale. This function does not
perform any colorspace transform. If the input is in a color space, then it
will compute the average SSIM.
Args:
a: First image (or set of images).
b: Second image (or set of images).
max_val: The maximum magnitude that `a` or `b` can have.
filter_size: Window size (>= 1). Image dims must be at least this small.
filter_sigma: The bandwidth of the Gaussian used for filtering (> 0.).
k1: One of the SSIM dampening parameters (> 0.).
k2: One of the SSIM dampening parameters (> 0.).
return_map: If True, will cause the per-pixel SSIM "map" to be returned.
Returns:
Each image's mean SSIM, or a tensor of individual values if `return_map`.
"""
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
# Construct a 1D Gaussian blur filter.
hw = filter_size // 2
shift = (2 * hw - filter_size + 1) / 2
f_i = ((jnp.arange(filter_size) - hw + shift) / filter_sigma)**2
filt = jnp.exp(-0.5 * f_i)
filt /= jnp.sum(filt)
# Blur in x and y (faster than the 2D convolution).
def convolve2d(z, f):
return jsp.signal.convolve2d(
z, f, mode="valid", precision=jax.lax.Precision.HIGHEST)
filt_fn1 = lambda z: convolve2d(z, filt[:, jnp.newaxis])
filt_fn2 = lambda z: convolve2d(z, filt[jnp.newaxis, :])
# `vmap` the blurs to the tensor size, and then compose them.
num_dims = len(a.shape)
map_axes = tuple(list(range(num_dims - 3)) + [num_dims - 1])
filt_fn = lambda z: filt_fn1(filt_fn2(z))
for d in map_axes:
filt_fn = jax.vmap(filt_fn, in_axes=d, out_axes=d)
mu0 = filt_fn(a)
mu1 = filt_fn(b)
mu00 = mu0 * mu0
mu11 = mu1 * mu1
mu01 = mu0 * mu1
sigma00 = filt_fn(a**2) - mu00
sigma11 = filt_fn(b**2) - mu11
sigma01 = filt_fn(a * b) - mu01
# Clip the variances and covariances to valid values.
# Variance must be non-negative:
sigma00 = jnp.maximum(0., sigma00)
sigma11 = jnp.maximum(0., sigma11)
sigma01 = jnp.sign(sigma01) * jnp.minimum(
jnp.sqrt(sigma00 * sigma11), jnp.abs(sigma01))
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
numer = (2 * mu01 + c1) * (2 * sigma01 + c2)
denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2)
ssim_map = numer / denom
ssim_value = jnp.mean(ssim_map, list(range(num_dims - 3, num_dims)))
return ssim_map if return_map else ssim_value
| rmse |
flux_setup.py | #!/usr/bin/env python
import argparse
import os,time
import numpy as np
from astropy.io import fits
from astropy.table import Table
from pypeit import msgs
from pypeit.par.util import make_pypeit_file
class SmartFormatter(argparse.HelpFormatter):
|
def parser(options=None):
parser = argparse.ArgumentParser(description='Parse', formatter_class=SmartFormatter)
parser.add_argument("sci_path", type=str, help="Path for Science folder")
parser.add_argument("--objmodel", type=str, default='qso', choices=['qso', 'star', 'poly'],
help="R|Science object model used in the telluric fitting.\n"
"The options are:\n"
"\n"
" qso = For quasars. You might need to set redshift, bal_wv_min_mx in the tell file.\n"
"\n"
" star = For stars. You need to set star_type, star_ra, star_dec, and star_mag in the tell_file.\n"
"\n"
" poly = For other type object, You might need to set fit_wv_min_mx, \n"
" and norder in the tell_file."
)
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args):
"""
This setups PypeIt files for fluxing, coadding and telluric corrections.
It will produce three files named as your_spectragraph.flux, your_spectragraph.coadd1d,
and your_spectragraph.tell
"""
allfiles = os.listdir(args.sci_path)
allfiles = np.sort(allfiles)
spec1dfiles = []
spec2dfiles = []
spec1dinfos = []
for ifile in allfiles:
if ('spec1d' in ifile) and ('.fits' in ifile):
spec1dfiles.append(ifile)
elif ('spec2d' in ifile) and ('.fits' in ifile):
spec2dfiles.append(ifile)
elif ('spec1d' in ifile) and ('.txt' in ifile):
spec1dinfos.append(ifile)
else:
msgs.warn('{:} is not a standard PypeIt output.'.format(ifile))
if len(spec2dfiles) > len(spec1dfiles):
msgs.warn('The following exposures do not have 1D extractions:')
for ii in range(len(spec2dfiles)):
if not os.path.exists(os.path.join(args.sci_path, spec2dfiles[ii].replace('spec2d','spec1d'))):
msgs.info('\t {:}'.format(spec2dfiles[ii]))
if len(spec1dfiles) > 0:
par = fits.open(os.path.join(args.sci_path, spec1dfiles[0]))
## fluxing pypeit file
spectrograph = par[0].header['PYP_SPEC']
pypeline = par[0].header['PYPELINE']
flux_file = '{:}.flux'.format(spectrograph)
cfg_lines = ['[fluxcalib]']
cfg_lines += [' extinct_correct = False # Set to True if your SENSFUNC derived with the UVIS algorithm\n']
cfg_lines += ['# Please add your SENSFUNC file name below before running pypeit_flux_calib']
make_pypeit_file(flux_file, spectrograph, spec1dfiles, cfg_lines=cfg_lines, setup_mode=True)
fin = open(flux_file, "rt")
data = fin.read()
data = data.replace('spec1d_', os.path.join(args.sci_path,'spec1d_'))
data = data.replace('data', 'flux')
fin.close()
fin = open(flux_file, "wt")
fin.write(data)
fin.close()
## coadd1d pypeit file
coadd1d_file = '{:}.coadd1d'.format(spectrograph)
cfg_lines = ['[coadd1d]']
cfg_lines += [' coaddfile = YOUR_OUTPUT_FILE_NAME # Please set your output file name']
cfg_lines += [' sensfuncfile = YOUR_SENSFUNC_FILE # Please set your SENSFUNC file name']
if pypeline == 'Echelle':
cfg_lines += [' wave_method = velocity # creates a uniformly space grid in log10(lambda)\n']
else:
cfg_lines += [' wave_method = linear # creates a uniformly space grid in lambda\n']
cfg_lines += ['# This file includes all extracted objects. You need to figure out which object you want to \n'+\
'# coadd before running pypeit_coadd_1dspec!!!']
spec1d_info = []
for ii in range(len(spec1dfiles)):
meta_tbl = Table.read(os.path.join(args.sci_path, spec1dfiles[ii]).replace('.fits', '.txt'),
format='ascii.fixed_width')
_, indx = np.unique(meta_tbl['name'],return_index=True)
objects = meta_tbl[indx]
for jj in range(len(objects)):
spec1d_info.append(spec1dfiles[ii] + ' '+ objects['name'][jj])
make_pypeit_file(coadd1d_file, spectrograph, spec1d_info, cfg_lines=cfg_lines, setup_mode=True)
fin = open(coadd1d_file, "rt")
data = fin.read()
data = data.replace('spec1d_', os.path.join(args.sci_path,'spec1d_'))
data = data.replace('data', 'coadd1d')
fin.close()
fin = open(coadd1d_file, "wt")
fin.write(data)
fin.close()
## tellfit pypeit file
tellfit_file = '{:}.tell'.format(spectrograph)
cfg_lines = ['[tellfit]']
if args.objmodel == 'qso':
cfg_lines += [' objmodel = qso']
cfg_lines += [' redshift = 0.0']
cfg_lines += [' bal_wv_min_max = 10000.,11000.']
elif args.objmodel == 'star':
cfg_lines += [' objmodel = star']
cfg_lines += [' star_type = A0']
cfg_lines += [' star_mag = 0.0']
elif args.objmodel == 'poly':
cfg_lines += [' objmodel = poly']
cfg_lines += [' polyorder = 5']
cfg_lines += [' fit_wv_min_max = 17000.0,22000.0']
with open(tellfit_file, 'w') as f:
f.write('# Auto-generated PypeIt file\n')
f.write('# {0}\n'.format(time.strftime("%a %d %b %Y %H:%M:%S", time.localtime())))
f.write("\n")
f.write("# User-defined execution parameters\n")
f.write("# This is only an example. Make sure to change the following parameters accordingly.\n")
f.write('\n'.join(cfg_lines))
f.write('\n')
f.write('\n')
msgs.info('PypeIt file written to: {0}'.format(tellfit_file))
| def _split_lines(self, text, width):
if text.startswith('R|'):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width) |
entropy.rs | use image;
use image::ImageResult;
macro_rules! io_try(
($e: expr) => (
match $e {
Ok(e) => e,
Err(err) => return Err(image::IoError(err))
}
)
)
#[deriving(Default, Clone)]
pub struct HuffTable {
lut: Vec<(u8, u8)>,
valptr: Vec<int>,
huffval: Vec<u8>,
maxcode: Vec<int>,
mincode: Vec<int>,
}
pub struct HuffDecoder {
pub bits: u32,
pub num_bits: u8,
pub end: bool,
pub marker: u8,
}
impl HuffDecoder {
pub fn new() -> HuffDecoder {
HuffDecoder {
bits: 0,
num_bits: 0,
end: false,
marker: 0
}
}
fn guarantee<R: Reader>(&mut self, r: &mut R, n: u8) -> ImageResult<()> {
while self.num_bits < n && !self.end {
let byte = io_try!(r.read_u8());
if byte == 0xFF {
let byte2 = io_try!(r.read_u8());
if byte2 != 0 {
self.marker = byte2;
self.end = true;
}
}
self.bits |= (byte as u32 << (32 - 8)) >> self.num_bits as uint;
self.num_bits += 8;
}
Ok(())
}
pub fn read_bit<R: Reader>(&mut self, r: &mut R) -> ImageResult<u8> {
let _ = try!(self.guarantee(r, 1));
let bit = (self.bits & (1 << 31)) >> 31;
self.consume(1);
Ok(bit as u8)
}
//Section F.2.2.4
//Figure F.17
pub fn receive<R: Reader>(&mut self, r: &mut R, ssss: u8) -> ImageResult<i32> {
let _ = try!(self.guarantee(r, ssss));
let bits = (self.bits & (0xFFFFFFFFu32 << (32 - ssss as uint))) >> (32 - ssss) as uint;
self.consume(ssss);
Ok(bits as i32)
}
fn consume(&mut self, n: u8) {
self.bits <<= n as uint;
self.num_bits -= n;
}
pub fn decode_symbol<R: Reader>(&mut self, r: &mut R, table: &HuffTable) -> ImageResult<u8> {
let _ = try!(self.guarantee(r, 8));
let index = (self.bits & 0xFF000000) >> (32 - 8);
let (val, size) = table.lut[index as uint];
if index < 256 && size < 9 {
self.consume(size);
Ok(val)
} else {
let mut code = 0u;
for i in range(0u, 16) {
let b = try!(self.read_bit(r));
code |= b as uint;
if (code as int) <= table.maxcode[i] {
let index = table.valptr[i] +
code as int -
table.mincode[i];
return Ok(table.huffval[index as uint])
}
code <<= 1;
}
Err(image::FormatError("Could not decode symbol.".to_string()))
}
}
}
/// Given an array containing the number of codes of each code length,
/// this function generates the huffman codes lengths and their respective
/// code lengths as specified by the JPEG spec.
fn derive_codes_and_sizes(bits: &[u8]) -> (Vec<u8>, Vec<u16>) {
let mut huffsize = Vec::from_elem(256, 0u8);
let mut huffcode = Vec::from_elem(256, 0u16);
let mut k = 0;
let mut j;
//Annex C.2
//Figure C.1
//Generate table of individual code lengths
for i in range(0u, 16) {
j = 0;
while j < bits[i] {
huffsize.as_mut_slice()[k] = i as u8 + 1;
k += 1;
j += 1;
}
}
huffsize.as_mut_slice()[k] = 0;
//Annex C.2
//Figure C.2
//Generate table of huffman codes
k = 0;
let mut code = 0u16;
let mut size = huffsize[0];
while huffsize[k] != 0 {
huffcode.as_mut_slice()[k] = code;
code += 1;
k += 1;
if huffsize[k] == size {
continue
}
let diff = huffsize[k] - size;
code <<= diff as uint;
size += diff
}
(huffsize, huffcode)
}
pub fn build_huff_lut(bits: &[u8], huffval: &[u8]) -> Vec<(u8, u16)> {
let mut lut = Vec::from_elem(256, (17u8, 0u16));
let (huffsize, huffcode) = derive_codes_and_sizes(bits);
for (i, &v) in huffval.iter().enumerate() {
lut.as_mut_slice()[v as uint] = (huffsize[i], huffcode[i]);
}
lut
}
pub fn | (bits: Vec<u8>, huffval: Vec<u8>) -> HuffTable {
let mut mincode = Vec::from_elem(16, -1i);
let mut maxcode = Vec::from_elem(16, -1i);
let mut valptr = Vec::from_elem(16, -1i);
let mut lut = Vec::from_elem(256, (0u8, 17u8));
let (huffsize, huffcode) = derive_codes_and_sizes(bits.as_slice());
//Annex F.2.2.3
//Figure F.15
let mut j = 0;
for i in range(0u, 16) {
if bits[i] != 0 {
valptr.as_mut_slice()[i] = j;
mincode.as_mut_slice()[i] = huffcode[j as uint] as int;
j += bits[i] as int - 1;
maxcode.as_mut_slice()[i] = huffcode[j as uint] as int;
j += 1;
}
}
for (i, v) in huffval.iter().enumerate() {
if huffsize[i] > 8 {
break
}
let r = 8 - huffsize[i] as uint;
for j in range(0u, 1 << r) {
let index = (huffcode[i] << r) + j as u16;
lut.as_mut_slice()[index as uint] = (*v, huffsize[i]);
}
}
HuffTable {
lut: lut,
huffval: huffval,
maxcode: maxcode,
mincode: mincode,
valptr: valptr
}
} | derive_tables |
iconset-demo.js | /**
* Copyright 2018 The Pennsylvania State University
* @license Apache-2.0, see License.md for full text.
*/
import { html, PolymerElement } from "@polymer/polymer/polymer-element.js";
import "@polymer/polymer/lib/elements/dom-repeat.js";
import { IronMeta } from "@polymer/iron-meta/iron-meta.js";
import "@polymer/iron-icon/iron-icon.js";
import "@polymer/marked-element/marked-element.js";
/**
* `iconset-demo`
* @element iconset-demo
* `iterates through an iconset array to generate a demo of all of the icons`
*
* @microcopy - language worth noting:
* -
*
* @polymer
* @demo demo/index.html
*/
class IconsetDemo extends PolymerElement {
/* REQUIRED FOR TOOLING DO NOT TOUCH */
/**
* Store the tag name to make it easier to obtain directly.
* @notice function name must be here for tooling to operate correctly
*/
static get tag() {
return "iconset-demo";
}
/**
* life cycle, element is ready
*/
connectedCallback() {
super.connectedCallback();
const iconSets = new IronMeta({ type: "iconset" });
let temp = [],
root = this;
// need to access iconset imperatively now
if (
typeof iconSets !== typeof undefined &&
iconSets.list &&
iconSets.list.length
) {
var index = 0;
iconSets.list.forEach(function(item) {
let name = item.name;
if (!root._hideIconset(name)) {
temp.push({
name: name,
icons: []
});
item.getIconNames().forEach(icon => {
temp[index].icons.push(icon);
});
index++;
}
});
}
this.set("__iconList", []);
this.set("__iconList", temp);
}
/**
* determines if a given iconset should be hidden
*
* @param {string} name the name of the iconset
* @returns {boolean} whether or n ot to hide the iconset
*/ | included = isets.length === 0 || isets.includes(name),
esets = this.excludeSets !== null ? this.excludeSets.split(/ /) : [],
excluded = esets.length.length > 0 && esets.includes(name);
return !included || excluded;
}
}
window.customElements.define(IconsetDemo.tag, IconsetDemo);
export { IconsetDemo }; | _hideIconset(name) {
let isets = this.includeSets !== null ? this.includeSets.split(/ /) : [], |
context.go | package context
import (
"os"
"strings"
)
// Context holds variables in present context
type Context struct {
variables []Variable
}
// Variable is struct definition for variable
type Variable struct {
name string
value interface{}
}
// GetName returns name of variable
func (v Variable) GetName() string {
return v.name
}
|
// DefineVar defines variable and puts it in present context
func (c *Context) DefineVar(variable string, value interface{}) {
var v = Variable{name: variable, value: value}
c.variables = append(c.variables, v)
}
// GetVars returns all defined variables in context
func (c *Context) GetVars() []Variable {
return c.variables
}
// GetVar finds var by name and returns its value
func (c Context) GetVar(name string) Variable {
for _, value := range c.variables {
if value.name == name {
return value
}
}
return Variable{
name: name,
value: name,
}
}
// Transpile text change variables from context
func (c *Context) Transpile(toCompile string) string {
for _, variable := range c.variables {
str, ok := variable.value.(string)
if ok {
toCompile = strings.Replace(toCompile, "$"+variable.name, str, -1)
}
}
return toCompile
}
//New Create context for agent
func New() *Context {
context := &Context{}
// insert environment variables in our context
for _, e := range os.Environ() {
pair := strings.Split(e, "=")
context.DefineVar(pair[0], pair[1])
}
return context
} | // GetValue returns value of variable
func (v Variable) GetValue() interface{} {
return v.value
} |
fake.go | /*
Copyright 2021 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "knative.dev/eventing/pkg/client/injection/informers/factory/fake"
inmemorychannel "knative.dev/eventing/pkg/client/injection/informers/messaging/v1/inmemorychannel"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = inmemorychannel.Get
func | () {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Messaging().V1().InMemoryChannels()
return context.WithValue(ctx, inmemorychannel.Key{}, inf), inf.Informer()
}
| init |
datasource_cli.py | """Cli entry point to setup db indexes."""
import click
import structlog
from click import Context
from miscutils.logging_config import Verbosity
from pymongo import ASCENDING, IndexModel
from pymongo.collection import Collection
from selectedtests.config.logging_config import config_logging
from selectedtests.datasource.mongo_wrapper import MongoWrapper
LOGGER = structlog.get_logger()
def setup_queue_indexes(collection: Collection) -> None:
"""
Create appropriate indexes for ProjectTestMappingWorkItems.
:param collection: Collection to add indexes to.
"""
index = IndexModel([("project", ASCENDING)], unique=True)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
def | (collection: Collection) -> None:
"""
Create appropriate indexes for the test and task mappings collections.
:param collection: Collection to add indexes to.
"""
# project, source_file on it's own could be unique, but the repo and branch are needed when
# there is a module.
index = IndexModel(
[
("project", ASCENDING),
("repo", ASCENDING),
("branch", ASCENDING),
("source_file", ASCENDING),
],
unique=True,
)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
def setup_mappings_tasks_indexes(collection: Collection) -> None:
"""
Create appropriate indexes for the mapping tasks collection.
The indexes must support both the $lookup operation and uniqueness constraints.
:param collection: Collection to add indexes to.
"""
index = IndexModel(
[("task_mapping_id", ASCENDING), ("name", ASCENDING), ("variant", ASCENDING)], unique=True
)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
def setup_mappings_test_files_indexes(collection: Collection) -> None:
"""
Create appropriate indexes for the mapping test files collection.
The indexes must support both the $lookup operation and uniqueness constraints.
:param collection: Collection to add indexes to.
"""
index = IndexModel([("test_mapping_id", ASCENDING), ("name", ASCENDING)], unique=True)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
@click.group()
@click.option("--verbose", is_flag=True, default=False, help="Enable verbose logging.")
@click.option("--mongo-uri", required=True, type=str, help="Mongo URI to connect to.")
@click.pass_context
def cli(ctx: Context, verbose: bool, mongo_uri: str) -> None:
"""Suite of MongoDB related commands, see the commands help for more details."""
ctx.ensure_object(dict)
ctx.obj["mongo"] = MongoWrapper.connect(mongo_uri)
verbosity = Verbosity.DEBUG if verbose else Verbosity.INFO
config_logging(verbosity, human_readable=False)
@cli.command()
@click.pass_context
def create_indexes(ctx: Context) -> None:
"""Initialize the mongo database with proper indexes."""
# Creating index no-ops if index already exists
setup_queue_indexes(ctx.obj["mongo"].test_mappings_queue())
setup_queue_indexes(ctx.obj["mongo"].task_mappings_queue())
setup_mappings_indexes(ctx.obj["mongo"].test_mappings())
setup_mappings_indexes(ctx.obj["mongo"].task_mappings())
setup_mappings_test_files_indexes(ctx.obj["mongo"].test_mappings_test_files())
setup_mappings_tasks_indexes(ctx.obj["mongo"].task_mappings_tasks())
def main() -> None:
"""Entry point for setting up selected-tests db indexes."""
return cli(obj={}, auto_envvar_prefix="SELECTED_TESTS")
| setup_mappings_indexes |
waypoint_updater.py | #!/usr/bin/env python
import numpy as np
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 30 # Number of waypoints we will publish. You can change this number
MAX_DECEL = 1
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb,queue_size = 1)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb, queue_size = 1)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb, queue_size = 1)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.stopline_wp_idx = -1
self.loop()
#rospy.spin()
def loop(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if self.pose and self.base_waypoints:
#Get closest waypoint
closest_waypoint_idx = self.get_closest_waypoint_idx()
self.publish_waypoints(closest_waypoint_idx)
rate.sleep()
def get_closest_waypoint_idx(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoint_tree.query([x,y],1)[1]
#check if closet is ahead or behind vehicle
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx - 1]
# Equation for hyperplane through closest_coords
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x,y])
val = np.dot(cl_vect - prev_vect, pos_vect -cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self,closest_idx):
#lane = Lane()
#lane.header = self.base_waypoints.header
#lane.waypoints = self.base_waypoints.waypoints[closest_idx:closest_idx + LOOKAHEAD_WPS]
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
def generate_lane(self):
lane = Lane()
closest_idx = self.get_closest_waypoint_idx()
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx]
if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):
lane.waypoints = base_waypoints
else:
lane.waypoints = self.decelerate_waypoints(base_waypoints,closest_idx)
return lane
def decelerate_waypoints(self,waypoints,closest_idx):
temp = []
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
stop_idx = max(self.stopline_wp_idx - closest_idx - 3, 0)
dist = self.distance(waypoints, i, stop_idx)
vel = math.sqrt(2 * MAX_DECEL * dist)
if vel < 1.0:
vel = 0
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
temp.append(p)
return temp
def pose_cb(self, msg):
# TODO: Implement
self.pose = msg
def waypoints_cb(self, waypoints):
# TODO: Implement
self.base_waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
self.stopline_wp_idx = msg.data
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
|
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist |
source_util.rs | use rustc_ast as ast;
use rustc_ast::ptr::P;
use rustc_ast::token;
use rustc_ast::tokenstream::TokenStream;
use rustc_ast_pretty::pprust;
use rustc_expand::base::{self, *};
use rustc_expand::module::DirectoryOwnership;
use rustc_parse::{self, new_parser_from_file, parser::Parser};
use rustc_session::lint::builtin::INCOMPLETE_INCLUDE;
use rustc_span::symbol::Symbol;
use rustc_span::{self, Pos, Span};
use smallvec::SmallVec;
use std::rc::Rc;
// These macros all relate to the file system; they either return
// the column/row/filename of the expression, or they include
// a given file into the current one.
/// line!(): expands to the current line number
pub fn expand_line(
cx: &mut ExtCtxt<'_>,
sp: Span,
tts: TokenStream,
) -> Box<dyn base::MacResult + 'static> {
let sp = cx.with_def_site_ctxt(sp);
base::check_zero_tts(cx, sp, tts, "line!");
let topmost = cx.expansion_cause().unwrap_or(sp);
let loc = cx.source_map().lookup_char_pos(topmost.lo());
base::MacEager::expr(cx.expr_u32(topmost, loc.line as u32))
}
/* column!(): expands to the current column number */
pub fn expand_column(
cx: &mut ExtCtxt<'_>,
sp: Span,
tts: TokenStream,
) -> Box<dyn base::MacResult + 'static> {
let sp = cx.with_def_site_ctxt(sp);
base::check_zero_tts(cx, sp, tts, "column!");
let topmost = cx.expansion_cause().unwrap_or(sp);
let loc = cx.source_map().lookup_char_pos(topmost.lo());
base::MacEager::expr(cx.expr_u32(topmost, loc.col.to_usize() as u32 + 1))
}
/// file!(): expands to the current filename */
/// The source_file (`loc.file`) contains a bunch more information we could spit
/// out if we wanted.
pub fn expand_file(
cx: &mut ExtCtxt<'_>,
sp: Span,
tts: TokenStream,
) -> Box<dyn base::MacResult + 'static> {
let sp = cx.with_def_site_ctxt(sp);
base::check_zero_tts(cx, sp, tts, "file!");
let topmost = cx.expansion_cause().unwrap_or(sp);
let loc = cx.source_map().lookup_char_pos(topmost.lo());
base::MacEager::expr(cx.expr_str(topmost, Symbol::intern(&loc.file.name.to_string())))
}
pub fn expand_stringify(
cx: &mut ExtCtxt<'_>,
sp: Span,
tts: TokenStream,
) -> Box<dyn base::MacResult + 'static> {
let sp = cx.with_def_site_ctxt(sp);
let s = pprust::tts_to_string(&tts);
base::MacEager::expr(cx.expr_str(sp, Symbol::intern(&s)))
}
pub fn expand_mod(
cx: &mut ExtCtxt<'_>,
sp: Span,
tts: TokenStream,
) -> Box<dyn base::MacResult + 'static> {
let sp = cx.with_def_site_ctxt(sp);
base::check_zero_tts(cx, sp, tts, "module_path!");
let mod_path = &cx.current_expansion.module.mod_path;
let string = mod_path.iter().map(|x| x.to_string()).collect::<Vec<String>>().join("::");
base::MacEager::expr(cx.expr_str(sp, Symbol::intern(&string)))
}
/// include! : parse the given file as an expr
/// This is generally a bad idea because it's going to behave
/// unhygienically.
pub fn expand_include<'cx>(
cx: &'cx mut ExtCtxt<'_>,
sp: Span,
tts: TokenStream,
) -> Box<dyn base::MacResult + 'cx> {
let sp = cx.with_def_site_ctxt(sp);
let file = match get_single_str_from_tts(cx, sp, tts, "include!") {
Some(f) => f,
None => return DummyResult::any(sp),
};
// The file will be added to the code map by the parser
let mut file = match cx.resolve_path(file, sp) {
Ok(f) => f,
Err(mut err) => {
err.emit();
return DummyResult::any(sp);
}
};
let p = new_parser_from_file(cx.parse_sess(), &file, Some(sp));
// If in the included file we have e.g., `mod bar;`,
// then the path of `bar.rs` should be relative to the directory of `file`.
// See https://github.com/rust-lang/rust/pull/69838/files#r395217057 for a discussion.
// `MacroExpander::fully_expand_fragment` later restores, so "stack discipline" is maintained.
file.pop();
cx.current_expansion.directory_ownership = DirectoryOwnership::Owned { relative: None };
let mod_path = cx.current_expansion.module.mod_path.clone();
cx.current_expansion.module = Rc::new(ModuleData { mod_path, directory: file });
struct ExpandResult<'a> {
p: Parser<'a>,
node_id: ast::NodeId,
}
impl<'a> base::MacResult for ExpandResult<'a> {
fn make_expr(mut self: Box<ExpandResult<'a>>) -> Option<P<ast::Expr>> {
let r = base::parse_expr(&mut self.p)?;
if self.p.token != token::Eof {
self.p.sess.buffer_lint(
&INCOMPLETE_INCLUDE,
self.p.token.span,
self.node_id,
"include macro expected single expression in source",
);
}
Some(r)
}
fn make_items(mut self: Box<ExpandResult<'a>>) -> Option<SmallVec<[P<ast::Item>; 1]>> {
let mut ret = SmallVec::new();
while self.p.token != token::Eof {
match self.p.parse_item() {
Err(mut err) => {
err.emit();
break;
}
Ok(Some(item)) => ret.push(item),
Ok(None) => {
let token = pprust::token_to_string(&self.p.token);
let msg = format!("expected item, found `{}`", token);
self.p.struct_span_err(self.p.token.span, &msg).emit();
break;
}
}
}
Some(ret)
}
}
Box::new(ExpandResult { p, node_id: cx.resolver.lint_node_id(cx.current_expansion.id) })
}
// include_str! : read the given file, insert it as a literal string expr
pub fn expand_include_str(
cx: &mut ExtCtxt<'_>,
sp: Span,
tts: TokenStream,
) -> Box<dyn base::MacResult + 'static> {
let sp = cx.with_def_site_ctxt(sp);
let file = match get_single_str_from_tts(cx, sp, tts, "include_str!") {
Some(f) => f,
None => return DummyResult::any(sp),
};
let file = match cx.resolve_path(file, sp) {
Ok(f) => f,
Err(mut err) => {
err.emit();
return DummyResult::any(sp);
}
};
match cx.source_map().load_binary_file(&file) {
Ok(bytes) => match std::str::from_utf8(&bytes) {
Ok(src) => |
Err(_) => {
cx.span_err(sp, &format!("{} wasn't a utf-8 file", file.display()));
DummyResult::any(sp)
}
},
Err(e) => {
cx.span_err(sp, &format!("couldn't read {}: {}", file.display(), e));
DummyResult::any(sp)
}
}
}
pub fn expand_include_bytes(
cx: &mut ExtCtxt<'_>,
sp: Span,
tts: TokenStream,
) -> Box<dyn base::MacResult + 'static> {
let sp = cx.with_def_site_ctxt(sp);
let file = match get_single_str_from_tts(cx, sp, tts, "include_bytes!") {
Some(f) => f,
None => return DummyResult::any(sp),
};
let file = match cx.resolve_path(file, sp) {
Ok(f) => f,
Err(mut err) => {
err.emit();
return DummyResult::any(sp);
}
};
match cx.source_map().load_binary_file(&file) {
Ok(bytes) => base::MacEager::expr(cx.expr_lit(sp, ast::LitKind::ByteStr(bytes.into()))),
Err(e) => {
cx.span_err(sp, &format!("couldn't read {}: {}", file.display(), e));
DummyResult::any(sp)
}
}
}
| {
let interned_src = Symbol::intern(&src);
base::MacEager::expr(cx.expr_str(sp, interned_src))
} |
view.py | from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventType
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message, get_setup_webhook_message
from zerver.models import UserProfile
SONARR_TOPIC_TEMPLATE = "{series_title}".strip()
SONARR_TOPIC_TEMPLATE_TEST = "Sonarr - Test".strip()
SONARR_TOPIC_TEMPLATE_HEALTH_CHECK = "Health {level}".strip()
SONARR_MESSAGE_TEMPLATE_SERIES_DELETED = "{series_title} has been deleted.".strip()
SONARR_MESSAGE_TEMPLATE_HEALTH_CHECK = "{message}.".strip()
SONARR_MESSAGE_TEMPLATE_EPISODES_RENAMED = "{series_title} episodes have been renamed.".strip()
SONARR_MESSAGE_TEMPLATE_EPISODE_IMPORTED = (
"{series_title} - {series_number}x{episode_number} - {episode_name} has been imported.".strip()
)
SONARR_MESSAGE_TEMPLATE_EPISODE_IMPORTED_UPGRADE = "{series_title} - {series_number}x{episode_number} - {episode_name} has been upgraded from {old_quality} to {new_quality}.".strip()
SONARR_MESSAGE_TEMPLATE_EPISODE_GRABBED = (
"{series_title} - {series_number}x{episode_number} - {episode_name} has been grabbed.".strip()
)
SONARR_MESSAGE_TEMPLATE_EPISODE_DELETED = (
"{series_title} - {series_number}x{episode_number} - {episode_name} has been deleted.".strip()
)
SONARR_MESSAGE_TEMPLATE_EPISODE_DELETED_UPGRADE = "{series_title} - {series_number}x{episode_number} - {episode_name} has been deleted due to quality upgrade.".strip()
ALL_EVENT_TYPES = [
"Grab",
"EpisodeFileDelete",
"Test",
"Download",
"SeriesDelete",
"Health",
"Rename",
]
@webhook_view("Sonarr", all_event_types=ALL_EVENT_TYPES)
@has_request_variables
def api_sonarr_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any] = REQ(argument_type="body"),
) -> HttpResponse:
body = get_body_for_http_request(payload)
subject = get_subject_for_http_request(payload)
check_send_webhook_message(request, user_profile, subject, body, payload["eventType"])
return json_success(request)
def get_subject_for_http_request(payload: Dict[str, Any]) -> str:
if payload["eventType"] != "Test" and payload["eventType"] != "Health":
topic = SONARR_TOPIC_TEMPLATE.format(series_title=payload["series"]["title"])
elif payload["eventType"] == "Test":
topic = SONARR_TOPIC_TEMPLATE_TEST
elif payload["eventType"] == "Health":
topic = SONARR_TOPIC_TEMPLATE_HEALTH_CHECK.format(level=payload["level"])
return topic
def get_body_for_health_check_event(payload: Dict[str, Any]) -> str:
return SONARR_MESSAGE_TEMPLATE_HEALTH_CHECK.format(message=payload["message"])
def get_body_for_episodes_renamed_event(payload: Dict[str, Any]) -> str:
|
def get_body_for_series_deleted_event(payload: Dict[str, Any]) -> str:
return SONARR_MESSAGE_TEMPLATE_SERIES_DELETED.format(series_title=payload["series"]["title"])
def get_body_for_episode_imported_upgrade_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
"new_quality": payload["episodeFile"]["quality"],
"old_quality": payload["deletedFiles"][0]["quality"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_IMPORTED_UPGRADE.format(**data)
def get_body_for_episode_imported_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_IMPORTED.format(**data)
def get_body_for_episode_grabbed_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_GRABBED.format(**data)
def get_body_for_episode_deleted_upgrade_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_DELETED_UPGRADE.format(**data)
def get_body_for_episode_deleted_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_DELETED.format(**data)
def get_body_for_http_request(payload: Dict[str, Any]) -> str:
if payload["eventType"] == "Test":
return get_setup_webhook_message("Sonarr")
elif payload["eventType"] == "Health":
return get_body_for_health_check_event(payload)
elif payload["eventType"] == "Rename":
return get_body_for_episodes_renamed_event(payload)
elif payload["eventType"] == "SeriesDelete":
return get_body_for_series_deleted_event(payload)
elif payload["eventType"] == "Download" and "isUpgrade" in payload:
if payload["isUpgrade"]:
return get_body_for_episode_imported_upgrade_event(payload)
else:
return get_body_for_episode_imported_event(payload)
elif payload["eventType"] == "Grab":
return get_body_for_episode_grabbed_event(payload)
elif payload["eventType"] == "EpisodeFileDelete" and "deleteReason" in payload:
if payload["deleteReason"] == "upgrade":
return get_body_for_episode_deleted_upgrade_event(payload)
else:
return get_body_for_episode_deleted_event(payload)
else:
raise UnsupportedWebhookEventType(payload["eventType"])
| return SONARR_MESSAGE_TEMPLATE_EPISODES_RENAMED.format(series_title=payload["series"]["title"]) |
UInt64ParamChoiceTest.py | import unittest
import zserio
from testutils import getZserioApi
class UInt64ParamChoiceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "choice_types.zs").uint64_param_choice
def testSelectorConstructor(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(self.VARIANT_A_SELECTOR, uint64ParamChoice.getSelector())
def testFromReader(self):
selector = self.VARIANT_B_SELECTOR
value = 234
writer = zserio.BitStreamWriter()
UInt64ParamChoiceTest._writeUInt64ParamChoiceToStream(writer, selector, value)
reader = zserio.BitStreamReader(writer.getByteArray())
uint64ParamChoice = self.api.UInt64ParamChoice.fromReader(reader, selector)
self.assertEqual(selector, uint64ParamChoice.getSelector())
self.assertEqual(value, uint64ParamChoice.getB())
def testEq(self):
uint64ParamChoice1 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
uint64ParamChoice2 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR) | self.assertFalse(uint64ParamChoice1 == uint64ParamChoice2)
uint64ParamChoice2.setA(value)
self.assertTrue(uint64ParamChoice1 == uint64ParamChoice2)
diffValue = value + 1
uint64ParamChoice2.setA(diffValue)
self.assertFalse(uint64ParamChoice1 == uint64ParamChoice2)
def testHash(self):
uint64ParamChoice1 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
uint64ParamChoice2 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(hash(uint64ParamChoice1), hash(uint64ParamChoice2))
value = 99
uint64ParamChoice1.setA(value)
self.assertTrue(hash(uint64ParamChoice1) != hash(uint64ParamChoice2))
uint64ParamChoice2.setA(value)
self.assertEqual(hash(uint64ParamChoice1), hash(uint64ParamChoice2))
diffValue = value + 1
uint64ParamChoice2.setA(diffValue)
self.assertTrue(hash(uint64ParamChoice1) != hash(uint64ParamChoice2))
def testGetSelector(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_C_SELECTOR)
self.assertEqual(self.VARIANT_C_SELECTOR, uint64ParamChoice.getSelector())
def testGetSetA(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
value = 99
uint64ParamChoice.setA(value)
self.assertEqual(value, uint64ParamChoice.getA())
def testGetSetB(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
value = 234
uint64ParamChoice.setB(value)
self.assertEqual(value, uint64ParamChoice.getB())
def testGetSetC(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_C_SELECTOR)
value = 23456
uint64ParamChoice.setC(value)
self.assertEqual(value, uint64ParamChoice.getC())
def testBitSizeOf(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(8, uint64ParamChoice.bitSizeOf())
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
self.assertEqual(16, uint64ParamChoice.bitSizeOf())
def testInitializeOffsets(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
bitPosition = 1
self.assertEqual(9, uint64ParamChoice.initializeOffsets(bitPosition))
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
self.assertEqual(17, uint64ParamChoice.initializeOffsets(bitPosition))
def testReadWrite(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
byteValue = 99
uint64ParamChoice.setA(byteValue)
writer = zserio.BitStreamWriter()
uint64ParamChoice.write(writer)
readUInt64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
reader = zserio.BitStreamReader(writer.getByteArray())
readUInt64ParamChoice.read(reader)
self.assertEqual(byteValue, readUInt64ParamChoice.getA())
self.assertEqual(uint64ParamChoice, readUInt64ParamChoice)
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
shortValue = 234
uint64ParamChoice.setB(shortValue)
writer = zserio.BitStreamWriter()
uint64ParamChoice.write(writer)
readUInt64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
reader = zserio.BitStreamReader(writer.getByteArray())
readUInt64ParamChoice.read(reader)
self.assertEqual(shortValue, readUInt64ParamChoice.getB())
self.assertEqual(uint64ParamChoice, readUInt64ParamChoice)
@staticmethod
def _writeUInt64ParamChoiceToStream(writer, selector, value):
if selector == 1:
writer.writeSignedBits(value, 8)
elif selector in (2, 3, 4):
writer.writeSignedBits(value, 16)
elif selector in (5, 6):
pass
else:
writer.writeSignedBits(value, 32)
VARIANT_A_SELECTOR = 1
VARIANT_B_SELECTOR = 2
VARIANT_C_SELECTOR = 7 | self.assertTrue(uint64ParamChoice1 == uint64ParamChoice2)
value = 99
uint64ParamChoice1.setA(value) |
linked_list.py | class | :
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def add(self, data):
node = Node(data)
if not self.head:
self.head = node
else:
current = self.head
while current.next:
current = current.next
current.next = node
| Node |
system_usermanager.gen.py | from JumpScale import j
class system_usermanager(j.code.classGetBase()):
"""
get a user
"""
def __init__(self):
pass
self._te={}
self.actorname="usermanager"
self.appname="system"
#system_usermanager_osis.__init__(self)
def authenticate(self, name, secret, **kwargs):
"""
authenticate and return False if not successfull
otherwise return secret for api
param:name name
param:secret md5 or passwd
result str
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method authenticate")
def create(self, username, password, groups, emails, domain, provider, **kwargs):
|
def createGroup(self, name, domain, description, **kwargs):
"""
create a group
param:name name of group
param:domain domain of group
param:description description of group
result bool
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method createGroup")
def delete(self, username, **kwargs):
"""
Delete a user
param:username name of the user
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method delete")
def deleteGroup(self, id, **kwargs):
"""
delete a group
param:id id/name of group
result bool
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method deleteGroup")
def editGroup(self, name, domain, description, users, **kwargs):
"""
edit a group
param:name name of group
param:domain domain of group
param:description description of group
param:users list or comma seperate string of users
result bool
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method editGroup")
def editUser(self, username, groups, password, emails, domain, **kwargs):
"""
set Groups for a user
param:username name of user
param:groups list of groups this user belongs to
param:password password for user
param:emails list of email addresses
param:domain Domain of user
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method editUser")
def userexists(self, name, **kwargs):
"""
param:name name
result bool
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method userexists")
def userget(self, name, **kwargs):
"""
param:name name of user
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method userget")
def whoami(self, **kwargs):
"""
return username
result str
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method whoami")
| """
create a user
param:username name of user
param:password password optional when provider is set
param:groups list of groups this user belongs to
param:emails list of email addresses
param:domain domain of user
param:provider provider for this user
result str,
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method create") |
bitcoin_eo.ts | <?xml version="1.0" ?><!DOCTYPE TS><TS language="eo" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Bitcoin</source>
<translation>Pri Aurumcoin(AU)</translation>
</message>
<message>
<location line="+39"/>
<source><b>Bitcoin</b> version</source>
<translation><b>Aurumcoin(AU)</b>-a versio</translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The Aurumcoin(AU) developers</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Adresaro</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>Duoble-klaku por redakti adreson aŭ etikedon</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Kreu novan adreson</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Kopiu elektitan adreson al la tondejo</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>&Nova Adreso</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your Bitcoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>These are your Aurumcoin(AU) addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation>&Kopiu Adreson</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Bitcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified Bitcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Forviŝu</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your Bitcoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation>These are your Aurumcoin(AU) addresses for sending payments. Always check the amount and the receiving address before sending coins.</translation>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation>Kopiu &Etikedon</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>&Redaktu</translation>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>Eksportu Adresarajn Datumojn</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Diskoma dosiero (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>Eraro dum eksportado</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Ne eblis skribi al dosiero %1.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Etikedo</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adreso</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(ne etikedo)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Enigu pasfrazon</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Nova pasfrazo</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Ripetu novan pasfrazon</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Enigu novan pasfrazon por la monujo.<br/>Bonvolu, uzu pasfrazon kun <b>10 aŭ pli hazardaj signoj</b>, aŭ <b>ok aŭ pli vortoj</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Ĉifru monujon</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Ĉi tiu operacio devas vian monujan pasfrazon, por malŝlosi la monujon.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Malŝlosu monujon</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Ĉi tiu operacio devas vian monujan pasfrazon, por malĉifri la monujon.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Malĉifru monujon</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Anstataŭigu pasfrazon</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Enigu la malnovan kaj novan monujan pasfrazon.</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Konfirmu ĉifrado de monujo</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR BITCOINS</b>!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>Monujo ĉifrita</translation>
</message>
<message>
<location line="-56"/>
<source>Bitcoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your bitcoins from being stolen by malware infecting your computer.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Monujo ĉifrado fiaskis</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Ĉifrado de monujo fiaskis, kaŭze de interna eraro. Via monujo ne ĉifritas.</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>La pasfrazoj enigitaj ne samas.</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>Monujo malŝlosado fiaskis</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>La pasfrazo enigita por ĉifrado de monujo ne konformas.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Monujo malĉifrado fiaskis</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation>Subskribu &mesaĝon...</translation>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation>Sinkronigante kun reto...</translation>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation>&Superrigardo</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>&Transakcioj</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Esploru historion de transakcioj</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>&Eliru</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Eliru de aplikaĵo</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about Bitcoin</source>
<translation>Vidigu informaĵon pri Bitmono</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>Pri &QT</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Vidigu informaĵon pri Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Opcioj...</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>&Ĉifru Monujon...</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Anstataŭigu pasfrazon...</translation>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-347"/>
<source>Send coins to a Bitcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for Bitcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation>&Kontrolu mesaĝon...</translation>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>Bitcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation>Monujo</translation>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>&About Bitcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your Bitcoin addresses to prove you own them</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified Bitcoin addresses</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>&Dosiero</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>&Agordoj</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>&Helpo</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+47"/>
<source>Bitcoin client</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to Bitcoin network</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation><numerusform>%n horo</numerusform><numerusform>%n horoj</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n tago</numerusform><numerusform>%n tagoj</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation><numerusform>%n semajno</numerusform><numerusform>%n semajnoj</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation>Eraro</translation>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>Ĝisdata</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>Ĝisdatigante...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>Sendita transakcio</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>Envenanta transakcio</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid Bitcoin address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Monujo estas <b>ĉifrita</b> kaj nun <b>malŝlosita</b></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Monujo estas <b>ĉifrita</b> kaj nun <b>ŝlosita</b></translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. Bitcoin can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation>Reta Averto</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Redaktu Adreson</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Etikedo</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>La etikedo interrilatita kun ĉi tiun adreso</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Adreso</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>La adreso enigita "%1" jam ekzistas en la adresaro.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Bitcoin address.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Ne eblis malŝlosi monujon</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>Bitcoin-Qt</source>
<translation>Aurumcoin(AU)-Qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>versio</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Opcioj</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Automatically start Bitcoin after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Start Bitcoin on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Bitcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Connect to the Bitcoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation type="unfinished"/>
</message>
<message> | <message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Bitcoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Whether to show Bitcoin addresses in the transaction list or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&OK</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&Nuligu</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>&Apliku</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Bitcoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Bitcoin network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>Nekonfirmita:</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>Monujo</translation>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Lastaj transakcioj</b></translation>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start bitcoin: click-to-pay handler</source>
<translation>Cannot start Aurumcoin(AU): click-to-pay handler</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>Etikedo:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Reto</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Malfermu</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show the Bitcoin-Qt help message to get a list with possible Bitcoin command-line options.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-104"/>
<source>Bitcoin - Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Bitcoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Open the Bitcoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the Bitcoin RPC console.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Sendu Monojn</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation>Sendu samtempe al multaj ricevantoj</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>123.456 AU</source>
<translation>123,456 AU</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b>%1</b> al %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>Ĉu vi vere volas sendi %1?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation>kaj</translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&Etikedo:</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>Elektu adreson el adresaro</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Algluu adreson de tondejo</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>Forigu ĉi tiun ricevanton</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Bitcoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation>Algluu adreson de tondejo</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Bitcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Bitcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Bitcoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Enter Bitcoin signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The Aurumcoin(AU) developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/nekonfirmita</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 konfirmoj</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Dato</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Sumo</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>, ankoraŭ ne elsendita sukcese</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>nekonata</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Transakciaj detaloj</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>Dato</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adreso</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Sumo</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>Ricevita kun</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Ricevita de</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Sendita al</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Pago al vi mem</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Minita</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Transakcia tipo.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>Ĉiuj</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>Hodiaŭ</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Ricevita kun</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Sendita al</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>Al vi mem</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Minita</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Kopiu adreson</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Redaktu etikedon</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Diskoma dosiero (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Konfirmita</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Dato</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Etikedo</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Adreso</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Sumo</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>Eraro dum eksportado</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Ne eblis skribi al dosiero %1.</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>Bitcoin version</source>
<translation>Aurumcoin(AU)-a versio</translation>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or bitcoind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>Listigu instrukciojn</translation>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation>Opcioj:</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: bitcoin.conf)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: bitcoind.pid)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 11080 or testnet: 5744)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 21080 or testnet: 5745)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=bitcoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Bitcoin Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Bitcoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Bitcoin will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation>Ŝarĝante adresojn...</translation>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of Bitcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart Bitcoin to complete</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>Ŝarĝante blok-indekson...</translation>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. Bitcoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation>Ŝarĝante monujon...</translation>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation>Ŝarĝado finitas</translation>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation>Por uzi la opcion %s</translation>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation>Eraro</translation>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation type="unfinished"/>
</message>
</context>
</TS> | <location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation type="unfinished"/>
</message> |
package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
TUNE_VARIANTS = (
'none',
'cp2k-lmax-4',
'cp2k-lmax-5',
'cp2k-lmax-6',
'cp2k-lmax-7',
'molgw-lmax-4',
'molgw-lmax-5',
'molgw-lmax-6',
'molgw-lmax-7',
)
class Libint(AutotoolsPackage):
"""Libint is a high-performance library for computing
Gaussian integrals in quantum mechanics.
"""
homepage = "https://github.com/evaleev/libint"
url = "https://github.com/evaleev/libint/archive/v2.1.0.tar.gz"
version('2.6.0', sha256='4ae47e8f0b5632c3d2a956469a7920896708e9f0e396ec10071b8181e4c8d9fa')
version('2.4.2', sha256='86dff38065e69a3a51d15cfdc638f766044cb87e5c6682d960c14f9847e2eac3')
version('2.4.1', sha256='0513be124563fdbbc7cd3c7043e221df1bda236a037027ba9343429a27db8ce4')
version('2.4.0', sha256='52eb16f065406099dcfaceb12f9a7f7e329c9cfcf6ed9bfacb0cff7431dd6019')
version('2.2.0', sha256='f737d485f33ac819d7f28c6ce303b1f3a2296bfd2c14f7c1323f8c5d370bb0e3')
version('2.1.0', sha256='43c453a1663aa1c55294df89ff9ece3aefc8d1bbba5ea31dbfe71b2d812e24c8')
version('1.1.6', sha256='f201b0c621df678cfe8bdf3990796b8976ff194aba357ae398f2f29b0e2985a6')
version('1.1.5', sha256='ec8cd4a4ba1e1a98230165210c293632372f0e573acd878ed62e5ec6f8b6174b')
variant('fortran', default=False,
description='Build & install Fortran bindings')
variant('tune', default='none', multi=False,
values=TUNE_VARIANTS,
description='Tune libint for use with the given package')
# Build dependencies
depends_on('[email protected]:', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
# Libint 2 dependencies
depends_on('boost', when='@2:')
depends_on('gmp', when='@2:')
for tvariant in TUNE_VARIANTS[1:]:
conflicts('tune={0}'.format(tvariant), when='@:2.5.99',
msg=('for versions prior to 2.6, tuning for specific'
'codes/configurations is not supported'))
def url_for_version(self, version):
base_url = "https://github.com/evaleev/libint/archive"
if version == Version('1.0.0'):
return "{0}/LIBINT_1_00.tar.gz".format(base_url)
elif version < Version('2.1.0'):
return "{0}/release-{1}.tar.gz".format(base_url, version.dashed)
else:
return "{0}/v{1}.tar.gz".format(base_url, version)
def autoreconf(self, spec, prefix):
libtoolize()
aclocal('-I', 'lib/autoconf')
autoconf()
if '@2.6.0:' in spec:
# skip tarball creation and removal of dir with generated code
filter_file(r'^(export::.*)\s+tgz$', r'\1', 'export/Makefile')
@property
def optflags(self):
flags = '-O2'
# Optimizations for the Intel compiler, suggested by CP2K
# See ../libxc/package.py for rationale and doc.
if '%intel' in self.spec:
flags += ' -xSSE4.2 -axAVX,CORE-AVX2 -ipo'
return flags
def setup_build_environment(self, env):
# Set optimization flags
env.set('CFLAGS', self.optflags)
env.set('CXXFLAGS', self.optflags)
# Change AR to xiar if we compile with Intel and we
# find the executable
if '%intel' in self.spec and which('xiar'):
env.set('AR', 'xiar')
def configure_args(self):
config_args = ['--enable-shared']
optflags = self.optflags
# Optimization flag names have changed in libint 2
if self.version < Version('2.0.0'):
config_args.extend([
'--with-cc-optflags={0}'.format(optflags),
'--with-cxx-optflags={0}'.format(optflags)
])
else:
config_args.extend([
'--with-cxx-optflags={0}'.format(optflags),
'--with-cxxgen-optflags={0}'.format(optflags)
])
# Options required by CP2K, removed in libint 2
if self.version < Version('2.0.0'):
config_args.extend([
'--with-libint-max-am=5',
'--with-libderiv-max-am1=4'
])
if '@2.6.0:' in self.spec:
config_args += ['--with-libint-exportdir=generated']
tune_value = self.spec.variants['tune'].value
if tune_value.startswith('cp2k'):
lmax = int(tune_value.split('-lmax-')[1])
config_args += [
'--enable-eri=1',
'--enable-eri2=1',
'--enable-eri3=1',
'--with-max-am={0}'.format(lmax),
'--with-eri-max-am={0},{1}'.format(lmax, lmax - 1),
'--with-eri2-max-am={0},{1}'.format(lmax + 2, lmax + 1),
'--with-eri3-max-am={0},{1}'.format(lmax + 2, lmax + 1),
'--with-opt-am=3',
# keep code-size at an acceptable limit,
# cf. https://github.com/evaleev/libint/wiki#program-specific-notes:
'--enable-generic-code',
'--disable-unrolling',
]
if tune_value.startswith('molgw'):
lmax = int(tune_value.split('-lmax-')[1])
config_args += [
'--enable-1body=1',
'--enable-eri=0',
'--enable-eri2=0',
'--enable-eri3=0',
'--with-multipole-max-order=0',
'--with-max-am={0}'.format(lmax),
'--with-eri-max-am={0}'.format(lmax),
'--with-eri2-max-am={0}'.format(lmax),
'--with-eri3-max-am={0}'.format(lmax),
'--with-opt-am=2',
'--enable-contracted-ints',
# keep code-size at an acceptable limit,
# cf. https://github.com/evaleev/libint/wiki#program-specific-notes: | return config_args
@property
def build_targets(self):
if '@2.6.0:' in self.spec:
return ['export']
return []
@when('@2.6.0:')
def install(self, spec, prefix):
"""
Starting from libint 2.6.0 we're using the 2-stage build
to get support for the Fortran bindings, required by some
packages (CP2K notably).
"""
# upstream says that using configure/make for the generated code
# is deprecated and one should use CMake, but with the currently
# recent 2.7.0.b1 it still doesn't work
with working_dir(os.path.join(self.build_directory, 'generated')):
# straight from the AutotoolsPackage class:
options = [
'--prefix={0}'.format(prefix),
'--enable-shared',
'--with-cxx-optflags={0}'.format(self.optflags),
]
if '+fortran' in spec:
options += ['--enable-fortran']
configure = Executable('./configure')
configure(*options)
make()
make('install')
def patch(self):
# Use Fortran compiler to link the Fortran example, not the C++
# compiler
if '+fortran' in self.spec and self.spec.satisfies('%nvhpc'):
filter_file('$(CXX) $(CXXFLAGS)', '$(FC) $(FCFLAGS)',
'export/fortran/Makefile', string=True) | '--enable-generic-code',
'--disable-unrolling',
]
|
main.go | package main
import (
"fmt"
"net/http"
"os"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/log"
"github.com/totvslabs/couchbase-exporter/client"
"github.com/totvslabs/couchbase-exporter/collector"
kingpin "gopkg.in/alecthomas/kingpin.v2"
)
// nolint: gochecknoglobals,lll
var (
version = "dev"
app = kingpin.New("couchbase-exporter", "exports couchbase metrics in the prometheus format")
listenAddress = app.Flag("web.listen-address", "Address to listen on for web interface and telemetry").Default(":9420").String()
metricsPath = app.Flag("web.telemetry-path", "Path under which to expose metrics").Default("/metrics").String()
couchbaseURL = app.Flag("couchbase.url", "Couchbase URL to scrape").Default("http://localhost:8091").String()
couchbaseUsername = app.Flag("couchbase.username", "Couchbase username").String()
couchbasePassword = app.Flag("couchbase.password", "Couchbase password").OverrideDefaultFromEnvar("COUCHBASE_PASSWORD").String()
tasks = app.Flag("collectors.tasks", "Whether to collect tasks metrics").Default("true").Bool()
buckets = app.Flag("collectors.buckets", "Whether to collect buckets metrics").Default("true").Bool()
nodes = app.Flag("collectors.nodes", "Whether to collect nodes metrics").Default("true").Bool()
cluster = app.Flag("collectors.cluster", "Whether to collect cluster metrics").Default("true").Bool()
)
func | () {
log.AddFlags(app)
app.Version(version)
app.HelpFlag.Short('h')
kingpin.MustParse(app.Parse(os.Args[1:]))
log.Infof("starting couchbase-exporter %s...", version)
var client = client.New(*couchbaseURL, *couchbaseUsername, *couchbasePassword)
if *tasks {
prometheus.MustRegister(collector.NewTasksCollector(client))
}
if *buckets {
prometheus.MustRegister(collector.NewBucketsCollector(client))
}
if *nodes {
prometheus.MustRegister(collector.NewNodesCollector(client))
}
if *cluster {
prometheus.MustRegister(collector.NewClusterCollector(client))
}
http.Handle(*metricsPath, promhttp.Handler())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w,
`
<html>
<head><title>Couchbase Exporter</title></head>
<body>
<h1>Couchbase Exporter</h1>
<p><a href="`+*metricsPath+`">Metrics</a></p>
</body>
</html>
`)
})
log.Infof("server listening on %s", *listenAddress)
if err := http.ListenAndServe(*listenAddress, nil); err != nil {
log.Fatalf("failed to start server: %v", err)
}
}
| main |
test_api_build.py | """
This module tests the build API. These are high-level integration tests.
"""
import base64
from collections import OrderedDict
from glob import glob
import logging
import os
import re
import subprocess
import sys
import json
import uuid
# for version
import conda
from conda_build.conda_interface import PY3, url_path, LinkError, CondaError, cc_conda_build
import conda_build
from binstar_client.commands import remove, show
from binstar_client.errors import NotFound
from pkg_resources import parse_version
import pytest
import yaml
import tarfile
from conda_build import api, exceptions, __version__
from conda_build.build import VersionOrder
from conda_build.render import finalize_metadata
from conda_build.utils import (copy_into, on_win, check_call_env, convert_path_for_cygwin_or_msys2,
package_has_file, check_output_env, get_conda_operation_locks)
from conda_build.os_utils.external import find_executable
from conda_build.exceptions import DependencyNeedsBuildingError
from .utils import is_valid_dir, metadata_dir, fail_dir, add_mangling, FileNotFoundError
# define a few commonly used recipes - use os.path.join(metadata_dir, recipe) elsewhere
empty_sections = os.path.join(metadata_dir, "empty_sections")
def represent_ordereddict(dumper, data):
value = []
for item_key, item_value in data.items():
node_key = dumper.represent_data(item_key)
node_value = dumper.represent_data(item_value)
value.append((node_key, node_value))
return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)
yaml.add_representer(OrderedDict, represent_ordereddict)
class AnacondaClientArgs(object):
def __init__(self, specs, token=None, site=None, log_level=logging.INFO, force=False):
from binstar_client.utils import parse_specs
self.specs = [parse_specs(specs)]
self.spec = self.specs[0]
self.token = token
self.site = site
self.log_level = log_level
self.force = force
def | (cwd=None):
if not cwd:
cwd = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
tag = check_output_env(["git", "describe", "--abbrev=0"], cwd=cwd).rstrip()
if PY3:
tag = tag.decode("utf-8")
return tag
@pytest.fixture(params=[dirname for dirname in os.listdir(metadata_dir)
if is_valid_dir(metadata_dir, dirname)])
def recipe(request):
return os.path.join(metadata_dir, request.param)
# This tests any of the folders in the test-recipes/metadata folder that don't start with _
def test_recipe_builds(recipe, testing_config, testing_workdir, monkeypatch):
# These variables are defined solely for testing purposes,
# so they can be checked within build scripts
monkeypatch.setenv("CONDA_TEST_VAR", "conda_test")
monkeypatch.setenv("CONDA_TEST_VAR_2", "conda_test_2")
api.build(recipe, config=testing_config)
def test_token_upload(testing_workdir, testing_metadata):
folder_uuid = uuid.uuid4().hex
# generated with conda_test_account user, command:
# anaconda auth --create --name CONDA_BUILD_UPLOAD_TEST --scopes 'api repos conda'
args = AnacondaClientArgs(specs="conda_build_test/test_token_upload_" + folder_uuid,
token="co-143399b8-276e-48db-b43f-4a3de839a024",
force=True)
with pytest.raises(NotFound):
show.main(args)
testing_metadata.meta['package']['name'] = '_'.join([testing_metadata.name(), folder_uuid])
testing_metadata.config.token = args.token
# the folder with the test recipe to upload
api.build(testing_metadata)
# make sure that the package is available (should raise if it doesn't)
show.main(args)
# clean up - we don't actually want this package to exist
remove.main(args)
# verify cleanup:
with pytest.raises(NotFound):
show.main(args)
@pytest.mark.parametrize("service_name", ["binstar", "anaconda"])
def test_no_anaconda_upload_condarc(service_name, testing_workdir, testing_config, capfd):
api.build(empty_sections, config=testing_config)
output, error = capfd.readouterr()
assert "Automatic uploading is disabled" in output, error
def test_git_describe_info_on_branch(testing_config):
recipe_path = os.path.join(metadata_dir, "_git_describe_number_branch")
m = api.render(recipe_path, config=testing_config)[0][0]
output = api.get_output_file_path(m)[0]
# missing hash because we set custom build string in meta.yaml
test_path = os.path.join(testing_config.croot, testing_config.host_subdir,
"git_describe_number_branch-1.20.2.0-1_g82c6ba6.tar.bz2")
assert test_path == output
def test_no_include_recipe_config_arg(testing_metadata):
"""Two ways to not include recipe: build/include_recipe: False in meta.yaml; or this.
Former is tested with specific recipe."""
outputs = api.build(testing_metadata)
assert package_has_file(outputs[0], "info/recipe/meta.yaml")
# make sure that it is not there when the command line flag is passed
testing_metadata.config.include_recipe = False
testing_metadata.meta['build']['number'] = 2
# We cannot test packages without recipes as we cannot render them
output_file = api.build(testing_metadata, notest=True)[0]
assert not package_has_file(output_file, "info/recipe/meta.yaml")
def test_no_include_recipe_meta_yaml(testing_metadata, testing_config):
# first, make sure that the recipe is there by default. This test copied from above, but copied
# as a sanity check here.
outputs = api.build(testing_metadata)
assert package_has_file(outputs[0], "info/recipe/meta.yaml")
output_file = api.build(os.path.join(metadata_dir, '_no_include_recipe'),
config=testing_config)[0]
assert not package_has_file(output_file, "info/recipe/meta.yaml")
def test_early_abort(testing_config, capfd):
"""There have been some problems with conda-build dropping out early.
Make sure we aren't causing them"""
api.build(os.path.join(metadata_dir, '_test_early_abort'), config=testing_config)
output, error = capfd.readouterr()
assert "Hello World" in output
def test_output_build_path_git_source(testing_workdir, testing_config):
recipe_path = os.path.join(metadata_dir, "source_git_jinja2")
m = api.render(recipe_path, config=testing_config)[0][0]
output = api.get_output_file_paths(m)[0]
_hash = m.hash_dependencies()
test_path = os.path.join(testing_config.croot, testing_config.host_subdir,
"conda-build-test-source-git-jinja2-1.20.2-py{}{}{}_0_g262d444.tar.bz2".format(
sys.version_info.major, sys.version_info.minor, _hash))
assert output == test_path
@pytest.mark.serial
def test_build_with_no_activate_does_not_activate():
api.build(os.path.join(metadata_dir, '_set_env_var_no_activate_build'), activate=False,
anaconda_upload=False)
@pytest.mark.serial
def test_build_with_activate_does_activate():
api.build(os.path.join(metadata_dir, '_set_env_var_activate_build'), activate=True,
anaconda_upload=False)
@pytest.mark.skipif(sys.platform == "win32",
reason="no binary prefix manipulation done on windows.")
def test_binary_has_prefix_files(testing_workdir, testing_config):
api.build(os.path.join(metadata_dir, '_binary_has_prefix_files'), config=testing_config)
def test_relative_path_git_versioning(testing_workdir, testing_config):
# conda_build_test_recipe is a manual step. Clone it at the same level as
# your conda-build source.
cwd = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
'conda_build_test_recipe'))
tag = describe_root(cwd)
output = api.get_output_file_path(os.path.join(metadata_dir,
"_source_git_jinja2_relative_path"),
config=testing_config)[0]
assert tag in output
def test_relative_git_url_git_versioning(testing_workdir, testing_config):
cwd = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
'conda_build_test_recipe'))
tag = describe_root(cwd)
recipe = os.path.join(metadata_dir, "_source_git_jinja2_relative_git_url")
output = api.get_output_file_path(recipe, config=testing_config)[0]
assert tag in output
def test_dirty_variable_available_in_build_scripts(testing_workdir, testing_config):
recipe = os.path.join(metadata_dir, "_dirty_skip_section")
testing_config.dirty = True
api.build(recipe, config=testing_config)
with pytest.raises(subprocess.CalledProcessError):
testing_config.dirty = False
api.build(recipe, config=testing_config)
def dummy_executable(folder, exename):
# empty prefix by default - extra bit at beginning of file
if sys.platform == "win32":
exename = exename + ".bat"
dummyfile = os.path.join(folder, exename)
if sys.platform == "win32":
prefix = "@echo off\n"
else:
prefix = "#!/bin/bash\nexec 1>&2\n"
with open(dummyfile, 'w') as f:
f.write(prefix + """
echo ******* You have reached the dummy {}. It is likely there is a bug in
echo ******* conda that makes it not add the _build/bin directory onto the
echo ******* PATH before running the source checkout tool
exit -1
""".format(exename))
if sys.platform != "win32":
import stat
st = os.stat(dummyfile)
os.chmod(dummyfile, st.st_mode | stat.S_IEXEC)
return exename
def test_checkout_tool_as_dependency(testing_workdir, testing_config, monkeypatch):
# "hide" svn by putting a known bad one on PATH
exename = dummy_executable(testing_workdir, "svn")
monkeypatch.setenv("PATH", testing_workdir, prepend=os.pathsep)
FNULL = open(os.devnull, 'w')
with pytest.raises(subprocess.CalledProcessError, message="Dummy svn was not executed"):
check_call_env([exename, '--version'], stderr=FNULL)
FNULL.close()
env = dict(os.environ)
env["PATH"] = os.pathsep.join([testing_workdir, env["PATH"]])
api.build(os.path.join(metadata_dir, '_checkout_tool_as_dependency'), config=testing_config)
platforms = ["64" if sys.maxsize > 2**32 else "32"]
if sys.platform == "win32":
platforms = set(["32", ] + platforms)
compilers = ["2.7", "3.4", "3.5"]
msvc_vers = ['9.0', '10.0', '14.0']
else:
msvc_vers = []
compilers = [".".join([str(sys.version_info.major), str(sys.version_info.minor)])]
@pytest.mark.skipif(sys.platform != "win32", reason="MSVC only on windows")
@pytest.mark.parametrize("msvc_ver", msvc_vers)
def test_build_msvc_compiler(msvc_ver, monkeypatch):
# verify that the correct compiler is available
cl_versions = {"9.0": 15,
"10.0": 16,
"11.0": 17,
"12.0": 18,
"14.0": 19}
monkeypatch.setenv('CONDATEST_MSVC_VER', msvc_ver)
monkeypatch.setenv('CL_EXE_VERSION', str(cl_versions[msvc_ver]))
try:
# Always build Python 2.7 - but set MSVC version manually via Jinja template
api.build(os.path.join(metadata_dir, '_build_msvc_compiler'), python="2.7")
except:
raise
finally:
del os.environ['CONDATEST_MSVC_VER']
del os.environ['CL_EXE_VERSION']
@pytest.mark.parametrize("platform", platforms)
@pytest.mark.parametrize("target_compiler", compilers)
def test_cmake_generator(platform, target_compiler, testing_workdir, testing_config):
testing_config.variant['python'] = target_compiler
api.build(os.path.join(metadata_dir, '_cmake_generator'), config=testing_config)
@pytest.mark.skipif(sys.platform == "win32",
reason="No windows symlinks")
def test_symlink_fail(testing_workdir, testing_config, capfd):
with pytest.raises((SystemExit, FileNotFoundError)):
api.build(os.path.join(fail_dir, "symlinks"), config=testing_config)
# output, error = capfd.readouterr()
# assert error.count("Error") == 6, "did not find appropriate count of Error in: " + error
def test_pip_in_meta_yaml_fail(testing_workdir, testing_config):
with pytest.raises(ValueError) as exc:
api.build(os.path.join(fail_dir, "pip_reqs_fail_informatively"), config=testing_config)
assert "environment.yml" in str(exc)
def test_recursive_fail(testing_workdir, testing_config):
with pytest.raises((RuntimeError, exceptions.DependencyNeedsBuildingError)) as exc:
api.build(os.path.join(fail_dir, "recursive-build"), config=testing_config)
# indentation critical here. If you indent this, and the exception is not raised, then
# the exc variable here isn't really completely created and shows really strange errors:
# AttributeError: 'ExceptionInfo' object has no attribute 'typename'
assert "recursive-build2" in str(exc.value)
def test_jinja_typo(testing_workdir, testing_config):
with pytest.raises(SystemExit) as exc:
api.build(os.path.join(fail_dir, "source_git_jinja2_oops"), config=testing_config)
assert "GIT_DSECRIBE_TAG" in exc.exconly()
@pytest.mark.serial
def test_skip_existing(testing_workdir, testing_config, capfd):
# build the recipe first
api.build(empty_sections, config=testing_config)
api.build(empty_sections, config=testing_config, skip_existing=True)
output, error = capfd.readouterr()
assert "are already built" in output
@pytest.mark.serial
def test_skip_existing_url(testing_metadata, testing_workdir, capfd):
# make sure that it is built
outputs = api.build(testing_metadata)
# Copy our package into some new folder
output_dir = os.path.join(testing_workdir, 'someoutput')
platform = os.path.join(output_dir, testing_metadata.config.host_subdir)
os.makedirs(platform)
copy_into(outputs[0], os.path.join(platform, os.path.basename(outputs[0])))
# create the index so conda can find the file
api.update_index(platform, config=testing_metadata.config)
# HACK: manually create noarch location there, so that conda 4.3.2+ considers a valid channel
noarch = os.path.join(output_dir, 'noarch')
os.makedirs(noarch)
api.update_index(noarch, config=testing_metadata.config)
testing_metadata.config.skip_existing = True
testing_metadata.config.channel_urls = [url_path(output_dir)]
api.build(testing_metadata)
output, error = capfd.readouterr()
assert "are already built" in output
assert url_path(testing_metadata.config.croot) in output
def test_failed_tests_exit_build(testing_workdir, testing_config):
"""https://github.com/conda/conda-build/issues/1112"""
with pytest.raises(SystemExit) as exc:
api.build(os.path.join(metadata_dir, "_test_failed_test_exits"), config=testing_config)
assert 'TESTS FAILED' in str(exc)
def test_requirements_txt_for_run_reqs(testing_workdir, testing_config):
"""
If run reqs are blank, then conda-build looks for requirements.txt in the recipe folder.
There has been a report of issue with unsatisfiable requirements at
https://github.com/Anaconda-Platform/anaconda-server/issues/2565
This test attempts to reproduce those conditions: a channel other than defaults with this
requirements.txt
"""
testing_config.channel_urls = ('conda_build_test', )
api.build(os.path.join(metadata_dir, "_requirements_txt_run_reqs"), config=testing_config)
@pytest.mark.serial
def test_compileall_compiles_all_good_files(testing_workdir, testing_config):
output = api.build(os.path.join(metadata_dir, "_compile-test"), config=testing_config)[0]
good_files = ['f1.py', 'f3.py']
bad_file = 'f2_bad.py'
for f in good_files:
assert package_has_file(output, f)
# look for the compiled file also
assert package_has_file(output, add_mangling(f))
assert package_has_file(output, bad_file)
assert not package_has_file(output, add_mangling(bad_file))
def test_render_setup_py_old_funcname(testing_workdir, testing_config, caplog):
api.build(os.path.join(metadata_dir, "_source_setuptools"), config=testing_config)
assert "Deprecation notice: the load_setuptools function has been renamed to " in caplog.text
@pytest.mark.skipif(not on_win, reason="only Windows is insane enough to have backslashes in paths")
def test_backslash_in_always_include_files_path(testing_config):
api.build(os.path.join(metadata_dir, '_backslash_in_include_files'))
with pytest.raises(RuntimeError):
api.build(os.path.join(fail_dir, 'backslash_in_include_files'))
def test_build_metadata_object(testing_metadata):
api.build(testing_metadata)
@pytest.mark.skipif(on_win, reason="fortran compilers on win are hard.")
def test_numpy_setup_py_data(testing_config):
recipe_path = os.path.join(metadata_dir, '_numpy_setup_py_data')
m = api.render(recipe_path, config=testing_config, numpy="1.11")[0][0]
_hash = m.hash_dependencies()
assert os.path.basename(api.get_output_file_path(m)[0]) == \
"load_setup_py_test-1.0a1-np111py{0}{1}{2}_1.tar.bz2".format(
sys.version_info.major, sys.version_info.minor, _hash)
def test_relative_git_url_submodule_clone(testing_workdir, testing_config, monkeypatch):
"""
A multi-part test encompassing the following checks:
1. That git submodules identified with both relative and absolute URLs can be mirrored
and cloned.
2. That changes pushed to the original repository are updated in the mirror and finally
reflected in the package version and filename via `GIT_DESCRIBE_TAG`.
3. That `source.py` is using `check_call_env` and `check_output_env` and that those
functions are using tools from the build env.
"""
toplevel = os.path.join(testing_workdir, 'toplevel')
os.mkdir(toplevel)
relative_sub = os.path.join(testing_workdir, 'relative_sub')
os.mkdir(relative_sub)
absolute_sub = os.path.join(testing_workdir, 'absolute_sub')
os.mkdir(absolute_sub)
sys_git_env = os.environ.copy()
sys_git_env['GIT_AUTHOR_NAME'] = 'conda-build'
sys_git_env['GIT_AUTHOR_EMAIL'] = '[email protected]'
sys_git_env['GIT_COMMITTER_NAME'] = 'conda-build'
sys_git_env['GIT_COMMITTER_EMAIL'] = '[email protected]'
# Find the git executable before putting our dummy one on PATH.
git = find_executable('git')
# Put the broken git on os.environ["PATH"]
exename = dummy_executable(testing_workdir, 'git')
monkeypatch.setenv("PATH", testing_workdir, prepend=os.pathsep)
# .. and ensure it gets run (and fails).
FNULL = open(os.devnull, 'w')
# Strangely ..
# stderr=FNULL suppresses the output from echo on OS X whereas
# stdout=FNULL suppresses the output from echo on Windows
with pytest.raises(subprocess.CalledProcessError, message="Dummy git was not executed"):
check_call_env([exename, '--version'], stdout=FNULL, stderr=FNULL)
FNULL.close()
for tag in range(2):
os.chdir(absolute_sub)
if tag == 0:
check_call_env([git, 'init'], env=sys_git_env)
with open('absolute', 'w') as f:
f.write(str(tag))
check_call_env([git, 'add', 'absolute'], env=sys_git_env)
check_call_env([git, 'commit', '-m', 'absolute{}'.format(tag)],
env=sys_git_env)
os.chdir(relative_sub)
if tag == 0:
check_call_env([git, 'init'], env=sys_git_env)
with open('relative', 'w') as f:
f.write(str(tag))
check_call_env([git, 'add', 'relative'], env=sys_git_env)
check_call_env([git, 'commit', '-m', 'relative{}'.format(tag)],
env=sys_git_env)
os.chdir(toplevel)
if tag == 0:
check_call_env([git, 'init'], env=sys_git_env)
with open('toplevel', 'w') as f:
f.write(str(tag))
check_call_env([git, 'add', 'toplevel'], env=sys_git_env)
check_call_env([git, 'commit', '-m', 'toplevel{}'.format(tag)],
env=sys_git_env)
if tag == 0:
check_call_env([git, 'submodule', 'add',
convert_path_for_cygwin_or_msys2(git, absolute_sub), 'absolute'],
env=sys_git_env)
check_call_env([git, 'submodule', 'add', '../relative_sub', 'relative'],
env=sys_git_env)
else:
# Once we use a more recent Git for Windows than 2.6.4 on Windows or m2-git we
# can change this to `git submodule update --recursive`.
check_call_env([git, 'submodule', 'foreach', git, 'pull'], env=sys_git_env)
check_call_env([git, 'commit', '-am', 'added submodules@{}'.format(tag)],
env=sys_git_env)
check_call_env([git, 'tag', '-a', str(tag), '-m', 'tag {}'.format(tag)],
env=sys_git_env)
# It is possible to use `Git for Windows` here too, though you *must* not use a different
# (type of) git than the one used above to add the absolute submodule, because .gitmodules
# stores the absolute path and that is not interchangeable between MSYS2 and native Win32.
#
# Also, git is set to False here because it needs to be rebuilt with the longer prefix. As
# things stand, my _b_env folder for this test contains more than 80 characters.
requirements = ('requirements', OrderedDict([
('build',
['git # [False]',
'm2-git # [win]',
'm2-filesystem # [win]'])]))
recipe_dir = os.path.join(testing_workdir, 'recipe')
if not os.path.exists(recipe_dir):
os.makedirs(recipe_dir)
filename = os.path.join(testing_workdir, 'recipe', 'meta.yaml')
data = OrderedDict([
('package', OrderedDict([
('name', 'relative_submodules'),
('version', '{{ GIT_DESCRIBE_TAG }}')])),
('source', OrderedDict([
('git_url', toplevel),
('git_tag', str(tag))])),
requirements,
('build', OrderedDict([
('script',
['git --no-pager submodule --quiet foreach git log -n 1 --pretty=format:%%s > '
'%PREFIX%\\summaries.txt # [win]',
'git --no-pager submodule --quiet foreach git log -n 1 --pretty=format:%s > '
'$PREFIX/summaries.txt # [not win]'])
])),
('test', OrderedDict([
('commands',
['echo absolute{}relative{} > %PREFIX%\\expected_summaries.txt # [win]'
.format(tag, tag),
'fc.exe /W %PREFIX%\\expected_summaries.txt %PREFIX%\\summaries.txt # [win]',
'echo absolute{}relative{} > $PREFIX/expected_summaries.txt # [not win]'
.format(tag, tag),
'diff -wuN ${PREFIX}/expected_summaries.txt ${PREFIX}/summaries.txt # [not win]'])
]))
])
with open(filename, 'w') as outfile:
outfile.write(yaml.dump(data, default_flow_style=False, width=999999999))
# Reset the path because our broken, dummy `git` would cause `render_recipe`
# to fail, while no `git` will cause the build_dependencies to be installed.
monkeypatch.undo()
# This will (after one spin round the loop) install and run 'git' with the
# build env prepended to os.environ[]
metadata = api.render(testing_workdir, config=testing_config)[0][0]
output = api.get_output_file_path(metadata, config=testing_config)[0]
assert ("relative_submodules-{}-".format(tag) in output)
api.build(metadata, config=testing_config)
def test_noarch(testing_workdir):
filename = os.path.join(testing_workdir, 'meta.yaml')
for noarch in (False, True):
data = OrderedDict([
('package', OrderedDict([
('name', 'test'),
('version', '0.0.0')])),
('build', OrderedDict([
('noarch', str(noarch))]))
])
with open(filename, 'w') as outfile:
outfile.write(yaml.dump(data, default_flow_style=False, width=999999999))
output = api.get_output_file_path(testing_workdir)[0]
assert (os.path.sep + "noarch" + os.path.sep in output or not noarch)
assert (os.path.sep + "noarch" + os.path.sep not in output or noarch)
def test_disable_pip(testing_config, testing_metadata):
testing_metadata.config.disable_pip = True
testing_metadata.meta['build']['script'] = 'python -c "import pip; print(pip.__version__)"'
with pytest.raises(subprocess.CalledProcessError):
api.build(testing_metadata)
testing_metadata.meta['build']['script'] = ('python -c "import setuptools; '
'print(setuptools.__version__)"')
with pytest.raises(subprocess.CalledProcessError):
api.build(testing_metadata)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="rpath fixup only done on Linux so far.")
def test_rpath_linux(testing_config):
api.build(os.path.join(metadata_dir, "_rpath"), config=testing_config)
def test_noarch_none_value(testing_workdir, testing_config):
recipe = os.path.join(metadata_dir, "_noarch_none")
with pytest.raises(exceptions.CondaBuildException):
api.build(recipe, config=testing_config)
def test_noarch_foo_value(testing_config):
outputs = api.build(os.path.join(metadata_dir, "noarch_generic"), config=testing_config)
metadata = json.loads(package_has_file(outputs[0], 'info/index.json').decode())
assert metadata['noarch'] == "generic"
def test_about_json_content(testing_metadata):
outputs = api.build(testing_metadata)
about = json.loads(package_has_file(outputs[0], 'info/about.json').decode())
assert 'conda_version' in about and about['conda_version'] == conda.__version__
assert 'conda_build_version' in about and about['conda_build_version'] == __version__
assert 'channels' in about and about['channels']
try:
assert 'env_vars' in about and about['env_vars']
except AssertionError:
# new versions of conda support this, so we should raise errors.
if VersionOrder(conda.__version__) >= VersionOrder('4.2.10'):
raise
else:
pass
assert 'root_pkgs' in about and about['root_pkgs']
@pytest.mark.xfail(parse_version(conda.__version__) < parse_version("4.3.14"),
reason="new noarch supported starting with conda 4.3.14")
def test_noarch_python_with_tests(testing_config):
recipe = os.path.join(metadata_dir, "_noarch_python_with_tests")
api.build(recipe, config=testing_config)
def test_noarch_python_1(testing_config):
output = api.build(os.path.join(metadata_dir, "_noarch_python"), config=testing_config)[0]
assert package_has_file(output, 'info/files') is not ''
extra = json.loads(package_has_file(output, 'info/link.json').decode())
assert 'noarch' in extra
assert 'entry_points' in extra['noarch']
assert 'type' in extra['noarch']
assert 'package_metadata_version' in extra
def test_legacy_noarch_python(testing_config):
output = api.build(os.path.join(metadata_dir, "_legacy_noarch_python"),
config=testing_config)[0]
# make sure that the package is going into the noarch folder
assert os.path.basename(os.path.dirname(output)) == 'noarch'
@pytest.mark.skipif(parse_version(conda.__version__) < parse_version("4.5"),
reason="full preferred env implementation deferred to conda 4.5")
def test_preferred_env(testing_config):
recipe = os.path.join(metadata_dir, "_preferred_env")
output = api.build(recipe, config=testing_config)[0]
extra = json.loads(package_has_file(output, 'info/link.json').decode())
assert 'preferred_env' in extra
assert 'name' in extra['preferred_env']
assert 'executable_paths' in extra['preferred_env']
exe_paths = extra['preferred_env']['executable_paths']
if on_win:
assert exe_paths == ['Scripts/exepath1.bat', 'Scripts/exepath2.bat']
else:
assert exe_paths == ['bin/exepath1', 'bin/exepath2']
assert 'package_metadata_version' in extra
@pytest.mark.serial
def test_skip_compile_pyc(testing_config):
outputs = api.build(os.path.join(metadata_dir, "skip_compile_pyc"), config=testing_config)
tf = tarfile.open(outputs[0])
pyc_count = 0
for f in tf.getmembers():
filename = os.path.basename(f.name)
_, ext = os.path.splitext(filename)
basename = filename.split('.', 1)[0]
if basename == 'skip_compile_pyc':
assert not ext == '.pyc', "a skip_compile_pyc .pyc was compiled: {}".format(filename)
if ext == '.pyc':
assert basename == 'compile_pyc', "an unexpected .pyc was compiled: {}".format(filename)
pyc_count = pyc_count + 1
assert pyc_count == 2, "there should be 2 .pyc files, instead there were {}".format(pyc_count)
def test_detect_binary_files_with_prefix(testing_config):
outputs = api.build(os.path.join(metadata_dir, "_detect_binary_files_with_prefix"),
config=testing_config)
matches = []
with tarfile.open(outputs[0]) as tf:
has_prefix = tf.extractfile('info/has_prefix')
contents = [p.strip().decode('utf-8') for p in
has_prefix.readlines()]
has_prefix.close()
matches = [entry for entry in contents if entry.endswith('binary-has-prefix') or
entry.endswith('"binary-has-prefix"')]
assert len(matches) == 1, "binary-has-prefix not recorded in info/has_prefix"
assert ' binary ' in matches[0], "binary-has-prefix not recorded as binary in info/has_prefix"
def test_skip_detect_binary_files_with_prefix(testing_config):
recipe = os.path.join(metadata_dir, "_skip_detect_binary_files_with_prefix")
outputs = api.build(recipe, config=testing_config)
matches = []
with tarfile.open(outputs[0]) as tf:
try:
has_prefix = tf.extractfile('info/has_prefix')
contents = [p.strip().decode('utf-8') for p in
has_prefix.readlines()]
has_prefix.close()
matches = [entry for entry in contents if entry.endswith('binary-has-prefix') or
entry.endswith('"binary-has-prefix"')]
except:
pass
assert len(matches) == 0, "binary-has-prefix recorded in info/has_prefix despite:" \
"build/detect_binary_files_with_prefix: false"
def test_fix_permissions(testing_config):
recipe = os.path.join(metadata_dir, "fix_permissions")
outputs = api.build(recipe, config=testing_config)
with tarfile.open(outputs[0]) as tf:
for f in tf.getmembers():
assert f.mode & 0o444 == 0o444, "tar member '{}' has invalid (read) mode".format(f.name)
@pytest.mark.skipif(not on_win, reason="windows-only functionality")
@pytest.mark.parametrize('recipe_name', ["_script_win_creates_exe",
"_script_win_creates_exe_garbled"])
def test_script_win_creates_exe(testing_config, recipe_name):
recipe = os.path.join(metadata_dir, recipe_name)
outputs = api.build(recipe, config=testing_config)
assert package_has_file(outputs[0], 'Scripts/test-script.exe')
assert package_has_file(outputs[0], 'Scripts/test-script-script.py')
def test_output_folder_moves_file(testing_metadata, testing_workdir):
testing_metadata.config.output_folder = testing_workdir
outputs = api.build(testing_metadata, no_test=True)
assert outputs[0].startswith(testing_workdir)
def test_info_files_json(testing_config):
outputs = api.build(os.path.join(metadata_dir, "ignore_some_prefix_files"),
config=testing_config)
assert package_has_file(outputs[0], "info/paths.json")
with tarfile.open(outputs[0]) as tf:
data = json.loads(tf.extractfile('info/paths.json').read().decode('utf-8'))
fields = ["_path", "sha256", "size_in_bytes", "path_type", "file_mode", "no_link",
"prefix_placeholder", "inode_paths"]
for key in data.keys():
assert key in ['paths', 'paths_version']
for paths in data.get('paths'):
for field in paths.keys():
assert field in fields
assert len(data.get('paths')) == 2
for file in data.get('paths'):
for key in file.keys():
assert key in fields
short_path = file.get("_path")
if short_path == "test.sh" or short_path == "test.bat":
assert file.get("prefix_placeholder") is not None
assert file.get("file_mode") is not None
else:
assert file.get("prefix_placeholder") is None
assert file.get("file_mode") is None
def test_build_expands_wildcards(mocker, testing_workdir):
build_tree = mocker.patch("conda_build.build.build_tree")
config = api.Config()
files = ['abc', 'acb']
for f in files:
os.makedirs(f)
with open(os.path.join(f, 'meta.yaml'), 'w') as fh:
fh.write('\n')
api.build(["a*"], config=config)
output = [os.path.join(os.getcwd(), path, 'meta.yaml') for path in files]
build_tree.assert_called_once_with(output, build_only=False, config=mocker.ANY,
need_source_download=True, notest=False,
post=None, variants=None)
@pytest.mark.serial
@pytest.mark.parametrize('set_build_id', [True, False])
def test_remove_workdir_default(testing_config, caplog, set_build_id):
recipe = os.path.join(metadata_dir, '_keep_work_dir')
# make a metadata object - otherwise the build folder is computed within the build, but does
# not alter the config object that is passed in. This is by design - we always make copies
# of the config object rather than edit it in place, so that variants don't clobber one
# another
metadata = api.render(recipe, config=testing_config)[0][0]
api.build(metadata, set_build_id=set_build_id)
assert not glob(os.path.join(metadata.config.work_dir, '*'))
@pytest.mark.serial
def test_keep_workdir_and_dirty_reuse(testing_config, capfd):
recipe = os.path.join(metadata_dir, '_keep_work_dir')
# make a metadata object - otherwise the build folder is computed within the build, but does
# not alter the config object that is passed in. This is by design - we always make copies
# of the config object rather than edit it in place, so that variants don't clobber one
# another
metadata = api.render(recipe, config=testing_config, dirty=True, remove_work_dir=False)[0][0]
workdir = metadata.config.work_dir
api.build(metadata)
out, err = capfd.readouterr()
assert glob(os.path.join(metadata.config.work_dir, '*'))
# test that --dirty reuses the same old folder
metadata = api.render(recipe, config=testing_config, dirty=True, remove_work_dir=False)[0][0]
assert workdir == metadata.config.work_dir
# test that without --dirty, we don't reuse the folder
metadata = api.render(recipe, config=testing_config)[0][0]
assert workdir != metadata.config.work_dir
testing_config.clean()
def test_workdir_removal_warning(testing_config, caplog):
recipe = os.path.join(metadata_dir, '_test_uses_src_dir')
with pytest.raises(ValueError) as exc:
api.build(recipe, config=testing_config)
assert "work dir is removed" in str(exc)
# @pytest.mark.serial
# @pytest.mark.skipif(not sys.platform.startswith('linux'),
# reason="cross compiler packages created only on Linux right now")
# @pytest.mark.xfail(VersionOrder(conda.__version__) < VersionOrder('4.3.2'),
# reason="not completely implemented yet")
# def test_cross_compiler(testing_workdir, testing_config, capfd):
# # TODO: testing purposes. Package from @mingwandroid's channel, copied to conda_build_test
# testing_config.channel_urls = ('conda_build_test', )
# # activation is necessary to set the appropriate toolchain env vars
# testing_config.activate = True
# # testing_config.debug = True
# recipe_dir = os.path.join(metadata_dir, '_cross_helloworld')
# output = api.build(recipe_dir, config=testing_config)[0]
# assert output.startswith(os.path.join(testing_config.croot, 'linux-imx351uc'))
@pytest.mark.skipif(sys.platform != 'darwin', reason="relevant to mac only")
def test_append_python_app_osx(testing_config):
"""Recipes that use osx_is_app need to have python.app in their runtime requirements.
conda-build will add it if it's missing."""
recipe = os.path.join(metadata_dir, '_osx_is_app_missing_python_app')
# tests will fail here if python.app is not added to the run reqs by conda-build, because
# without it, pythonw will be missing.
api.build(recipe, config=testing_config)
# Not sure about this behavior. Basically, people need to realize that if they
# start with a recipe from disk, they should not then alter the metadata
# object. Later reparsing will clobber their edits to the object. The
# complicated thing is that these edits are indistinguishable from Jinja2
# templating doing its normal thing.
# def test_clobbering_manually_set_metadata_raises(testing_metadata, testing_workdir):
# api.output_yaml(testing_metadata, 'meta.yaml')
# metadata = api.render(testing_workdir)[0][0]
# # make the package meta dict out of sync with file contents
# metadata.meta['package']['name'] = 'steve'
# # re-render happens as part of build. We should see an error about clobbering our customized
# # meta dict
# with pytest.raises(ValueError):
# api.build(metadata)
@pytest.mark.serial
def test_run_exports(testing_metadata, testing_config, testing_workdir):
api.build(os.path.join(metadata_dir, '_run_exports'), config=testing_config)
api.build(os.path.join(metadata_dir, '_run_exports_implicit_weak'), config=testing_config)
# run_exports is tricky. We mostly only ever want things in "host". Here are the conditions:
# 1. only build section present (legacy recipe). Here, use run_exports from build.
testing_metadata.meta['requirements']['build'] = ['test_has_run_exports']
api.output_yaml(testing_metadata, 'meta.yaml')
m = api.render(testing_workdir, config=testing_config)[0][0]
assert 'strong_pinned_package 1.0.*' in m.meta['requirements']['run']
assert 'weak_pinned_package 1.0.*' in m.meta['requirements']['run']
# 2. host present. Use run_exports from host, ignore 'weak' ones from build. All are
# weak by default.
testing_metadata.meta['requirements']['build'] = ['test_has_run_exports_implicit_weak']
testing_metadata.meta['requirements']['host'] = ['python']
api.output_yaml(testing_metadata, 'meta.yaml')
m = api.render(testing_workdir, config=testing_config)[0][0]
assert 'weak_pinned_package 2.0.*' not in m.meta['requirements']['run']
# 3. host present, and deps in build have "strong" run_exports section. use host, add
# in "strong" from build.
testing_metadata.meta['requirements']['build'] = ['test_has_run_exports']
testing_metadata.meta['requirements']['host'] = ['test_has_run_exports_implicit_weak']
api.output_yaml(testing_metadata, 'meta.yaml')
m = api.render(testing_workdir, config=testing_config)[0][0]
assert any('strong_pinned_package 1.0' in req for req in m.meta['requirements']['host'])
assert 'strong_pinned_package 1.0.*' in m.meta['requirements']['run']
# weak one from test_has_run_exports should be excluded, since it is a build dep
assert 'weak_pinned_package 1.0.*' not in m.meta['requirements']['run']
assert 'weak_pinned_package 2.0.*' in m.meta['requirements']['run']
@pytest.mark.serial
def test_ignore_run_exports(testing_metadata, testing_config):
# need to clear conda's index, or else we somehow pick up the test_run_exports folder
# above for our package here.
api.build(os.path.join(metadata_dir, '_run_exports'), config=testing_config)
testing_metadata.meta['requirements']['build'] = ['test_has_run_exports']
testing_metadata.meta['build']['ignore_run_exports'] = ['downstream_pinned_package']
testing_metadata.config.index = None
m = finalize_metadata(testing_metadata)
assert 'downstream_pinned_package 1.0' not in m.meta['requirements']['run']
def test_pin_subpackage_exact(testing_config):
recipe = os.path.join(metadata_dir, '_pin_subpackage_exact')
ms = api.render(recipe, config=testing_config)
assert any(re.match(r'run_exports_subpkg 1.0 h[a-f0-9]{%s}_0' % testing_config.hash_length,
req)
for (m, _, _) in ms for req in m.meta['requirements']['run'])
api.build(recipe, config=testing_config)
@pytest.mark.skipif(sys.platform != 'linux', reason="xattr code written here is specific to linux")
def test_copy_read_only_file_with_xattr(testing_config, testing_workdir):
src_recipe = os.path.join(metadata_dir, '_xattr_copy')
recipe = os.path.join(testing_workdir, '_xattr_copy')
copy_into(src_recipe, recipe)
# file is r/w for owner, but we change it to 400 after setting the attribute
ro_file = os.path.join(recipe, 'mode_400_file')
subprocess.check_call('setfattr -n user.attrib -v somevalue {}'.format(ro_file), shell=True)
subprocess.check_call('chmod 400 {}'.format(ro_file), shell=True)
api.build(recipe, config=testing_config)
@pytest.mark.serial
def test_env_creation_fail_exits_build(testing_config):
recipe = os.path.join(metadata_dir, '_post_link_exits_after_retry')
with pytest.raises((RuntimeError, LinkError, CondaError)):
api.build(recipe, config=testing_config)
recipe = os.path.join(metadata_dir, '_post_link_exits_tests')
with pytest.raises((RuntimeError, LinkError, CondaError)):
api.build(recipe, config=testing_config)
@pytest.mark.serial
def test_recursion_packages(testing_config):
"""Two packages that need to be built are listed in the recipe
make sure that both get built before the one needing them gets built."""
recipe = os.path.join(metadata_dir, '_recursive-build-two-packages')
api.build(recipe, config=testing_config)
@pytest.mark.serial
def test_recursion_layers(testing_config):
"""go two 'hops' - try to build a, but a needs b, so build b first, then come back to a"""
recipe = os.path.join(metadata_dir, '_recursive-build-two-layers')
api.build(recipe, config=testing_config)
@pytest.mark.skipif(sys.platform != 'win32', reason=("spaces break openssl prefix "
"replacement on *nix"))
def test_croot_with_spaces(testing_metadata, testing_workdir):
testing_metadata.config.croot = os.path.join(testing_workdir, "space path")
api.build(testing_metadata)
def test_unknown_selectors(testing_config):
recipe = os.path.join(metadata_dir, 'unknown_selector')
api.build(recipe, config=testing_config)
def test_extract_tarball_with_unicode_filename(testing_config):
"""See https://github.com/conda/conda-build/pull/1779"""
recipe = os.path.join(metadata_dir, '_unicode_in_tarball')
api.build(recipe, config=testing_config)
@pytest.mark.serial
def test_failed_recipe_leaves_folders(testing_config, testing_workdir):
recipe = os.path.join(fail_dir, 'recursive-build')
m = api.render(recipe, config=testing_config)[0][0]
locks = get_conda_operation_locks(m.config)
with pytest.raises((RuntimeError, exceptions.DependencyNeedsBuildingError)):
api.build(m)
assert os.path.isdir(m.config.build_folder), 'build folder was removed'
assert os.listdir(m.config.build_folder), 'build folder has no files'
# make sure that it does not leave lock files, though, as these cause permission errors on
# centralized installations
any_locks = False
locks_list = set()
for lock in locks:
if os.path.isfile(lock.lock_file):
any_locks = True
dest_path = base64.b64decode(os.path.basename(lock.lock_file))
if PY3 and hasattr(dest_path, 'decode'):
dest_path = dest_path.decode()
locks_list.add((lock.lock_file, dest_path))
assert not any_locks, "remaining locks:\n{}".format('\n'.join('->'.join((l, r))
for (l, r) in locks_list))
def test_only_r_env_vars_defined(testing_config):
recipe = os.path.join(metadata_dir, '_r_env_defined')
testing_config.channel_urls = ('r', )
api.build(recipe, config=testing_config)
def test_only_perl_env_vars_defined(testing_config):
recipe = os.path.join(metadata_dir, '_perl_env_defined')
testing_config.channel_urls = ('c3i_test', )
api.build(recipe, config=testing_config)
@pytest.mark.skipif(on_win, reason='no lua package on win')
def test_only_lua_env(testing_config):
recipe = os.path.join(metadata_dir, '_lua_env_defined')
testing_config.channel_urls = ('conda-forge', )
testing_config.prefix_length = 80
testing_config.set_build_id = False
api.build(recipe, config=testing_config)
def test_run_constrained_stores_constrains_info(testing_config):
recipe = os.path.join(metadata_dir, '_run_constrained')
out_file = api.build(recipe, config=testing_config)[0]
info_contents = json.loads(package_has_file(out_file, 'info/index.json'))
assert 'constrains' in info_contents
assert len(info_contents['constrains']) == 1
assert info_contents['constrains'][0] == 'bzip2 1.*'
@pytest.mark.serial
def test_no_locking(testing_config):
recipe = os.path.join(metadata_dir, 'source_git_jinja2')
api.update_index(os.path.join(testing_config.croot, testing_config.subdir),
config=testing_config)
api.build(recipe, config=testing_config, locking=False)
def test_test_dependencies(testing_workdir, testing_config):
recipe = os.path.join(fail_dir, 'check_test_dependencies')
with pytest.raises(exceptions.DependencyNeedsBuildingError) as e:
api.build(recipe, config=testing_config)
assert 'Unsatisfiable dependencies for platform ' in str(e.value)
assert 'pytest-package-does-not-exist' in str(e.value)
def test_runtime_dependencies(testing_workdir, testing_config):
recipe = os.path.join(fail_dir, 'check_runtime_dependencies')
with pytest.raises(exceptions.DependencyNeedsBuildingError) as e:
api.build(recipe, config=testing_config)
assert 'Unsatisfiable dependencies for platform ' in str(e.value)
assert 'some-nonexistent-package1' in str(e.value)
def test_no_force_upload_condarc_setting(mocker, testing_workdir, testing_metadata):
testing_metadata.config.anaconda_upload = True
del testing_metadata.meta['test']
api.output_yaml(testing_metadata, 'meta.yaml')
call = mocker.patch.object(conda_build.build.subprocess, 'call')
cc_conda_build['force_upload'] = False
pkg = api.build(testing_workdir)
assert call.called_once_with(['anaconda', 'upload', pkg])
del cc_conda_build['force_upload']
pkg = api.build(testing_workdir)
assert call.called_once_with(['anaconda', 'upload', '--force', pkg])
def test_setup_py_data_in_env(testing_config):
recipe = os.path.join(metadata_dir, '_setup_py_data_in_env')
# should pass with any modern python (just not 3.5)
api.build(recipe, config=testing_config)
# make sure it fails with our special python logic
with pytest.raises(subprocess.CalledProcessError):
api.build(recipe, config=testing_config, python='3.4')
def test_numpy_xx(testing_config):
recipe = os.path.join(metadata_dir, '_numpy_xx')
api.build(recipe, config=testing_config, numpy='1.12')
def test_numpy_xx_host(testing_config):
recipe = os.path.join(metadata_dir, '_numpy_xx_host')
api.build(recipe, config=testing_config, numpy='1.12')
def test_python_xx(testing_config):
recipe = os.path.join(metadata_dir, '_python_xx')
api.build(recipe, config=testing_config, python='3.4')
def test_indirect_numpy_dependency(testing_metadata):
testing_metadata.meta['requirements']['build'] = ['arrow-cpp 0.5.*']
testing_metadata.config.channel_urls = ['conda-forge']
api.build(testing_metadata, numpy=1.13)
def test_dependencies_with_notest(testing_workdir, testing_config):
recipe = os.path.join(metadata_dir, '_test_dependencies')
api.build(recipe, config=testing_config, notest=True)
with pytest.raises(DependencyNeedsBuildingError) as excinfo:
api.build(recipe, config=testing_config, notest=False)
assert 'Unsatisfiable dependencies for platform' in str(excinfo.value)
assert 'somenonexistentpackage1' in str(excinfo.value)
def test_source_cache_build(testing_workdir):
recipe = os.path.join(metadata_dir, 'source_git_jinja2')
config = api.Config(src_cache_root=testing_workdir)
api.build(recipe, notest=True, config=config)
git_cache_directory = '{}/git_cache' .format(testing_workdir)
assert os.path.isdir(git_cache_directory)
files = [filename for _, _, filenames in os.walk(git_cache_directory)
for filename in filenames]
assert len(files) > 0
def test_copy_test_source_files(testing_config):
recipe = os.path.join(metadata_dir, '_test_test_source_files')
filenames = set()
for copy in (False, True):
testing_config.copy_test_source_files = copy
outputs = api.build(recipe, notest=False, config=testing_config)
filenames.add(os.path.basename(outputs[0]))
tf = tarfile.open(outputs[0])
found = False
for f in tf.getmembers():
if f.name.startswith('info/test/'):
found = True
break
if found:
assert copy, "'info/test/' found in tar.bz2 but not copying test source files"
else:
assert not copy, "'info/test/' not found in tar.bz2 but copying test source files"
assert len(filenames) == 2, "copy_test_source_files does not modify the build hash but should"
def test_pin_depends(testing_config):
"""purpose of 'record' argument is to put a 'requires' file that records pinned run
dependencies
"""
recipe = os.path.join(metadata_dir, '_pin_depends_record')
m = api.render(recipe, config=testing_config)[0][0]
# the recipe python is not pinned, and having pin_depends set to record will not show it in record
assert not any(re.search('python\s+[23]\.', dep) for dep in m.meta['requirements']['run'])
output = api.build(m, config=testing_config)[0]
requires = package_has_file(output, 'info/requires')
assert requires
if PY3 and hasattr(requires, 'decode'):
requires = requires.decode()
assert re.search('python\=[23]\.', requires), "didn't find pinned python in info/requires"
def test_failed_patch_exits_build(testing_config):
with pytest.raises(RuntimeError):
api.build(os.path.join(metadata_dir, '_bad_patch'), config=testing_config)
def test_version_mismatch_in_variant_does_not_infinitely_rebuild_folder(testing_config):
# unsatisfiable; also not buildable (test_a recipe version is 2.0)
testing_config.variant['test_a'] = "1.0"
recipe = os.path.join(metadata_dir, '_build_deps_no_infinite_loop', 'test_b')
with pytest.raises(DependencyNeedsBuildingError):
api.build(recipe, config=testing_config)
# passes now, because package can be built, or is already built. Doesn't matter which.
testing_config.variant['test_a'] = "2.0"
api.build(recipe, config=testing_config)
| describe_root |
config.rs | use std::{
default::Default,
net::{IpAddr, Ipv4Addr, SocketAddr},
path::{Path, PathBuf},
str::FromStr,
time::Duration,
};
use log::error;
use protocol::{types::Address, ProtocolResult};
use tentacle::{
multiaddr::{multiaddr, Multiaddr, Protocol},
secio::{PublicKey, SecioKeyPair},
};
use crate::{
common::socket_to_multi_addr,
connection::ConnectionConfig,
error::NetworkError,
peer_manager::{ArcPeer, PeerManagerConfig, SharedSessionsConfig},
selfcheck::SelfCheckConfig,
traits::MultiaddrExt,
};
// TODO: 0.0.0.0 expose? 127.0.0.1 doesn't work because of tentacle-discovery.
// Default listen address: 0.0.0.0:2337
pub const DEFAULT_LISTEN_IP_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
pub const DEFAULT_LISTEN_PORT: u16 = 2337;
// Default max connections
pub const DEFAULT_MAX_CONNECTIONS: usize = 40;
// Default connection stream frame window lenght
pub const DEFAULT_MAX_FRAME_LENGTH: usize = 4 * 1024 * 1024; // 4 Mib
pub const DEFAULT_BUFFER_SIZE: usize = 24 * 1024 * 1024; // same as tentacle
// Default max wait streams for accept
pub const DEFAULT_MAX_WAIT_STREAMS: usize = 256;
// Default write timeout
pub const DEFAULT_WRITE_TIMEOUT: u64 = 10; // seconds
// Default peer data persistent path
pub const DEFAULT_PEER_FILE_NAME: &str = "peers";
pub const DEFAULT_PEER_FILE_EXT: &str = "dat";
pub const DEFAULT_PEER_DAT_FILE: &str = "./peers.dat";
pub const DEFAULT_PING_INTERVAL: u64 = 15;
pub const DEFAULT_PING_TIMEOUT: u64 = 30;
pub const DEFAULT_DISCOVERY_SYNC_INTERVAL: u64 = 60 * 60; // 1 hour
pub const DEFAULT_PEER_MANAGER_HEART_BEAT_INTERVAL: u64 = 30;
pub const DEFAULT_SELF_HEART_BEAT_INTERVAL: u64 = 35;
pub const DEFAULT_RPC_TIMEOUT: u64 = 10;
// Selfcheck
pub const DEFAULT_SELF_CHECK_INTERVAL: u64 = 30;
pub type PublicKeyHexStr = String;
pub type PrivateKeyHexStr = String;
pub type PeerAddrStr = String;
// Example:
// example.com:2077
struct DnsAddr {
host: String,
port: u16,
}
impl FromStr for DnsAddr {
type Err = NetworkError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
use NetworkError::*;
let comps = s.split(':').collect::<Vec<_>>();
if comps.len() != 2 {
return Err(UnexpectedPeerAddr(s.to_owned()));
}
let port = comps[1]
.parse::<u16>()
.map_err(|_| UnexpectedPeerAddr(s.to_owned()))?;
Ok(DnsAddr {
host: comps[0].to_owned(),
port,
})
}
}
// TODO: support Dns6
impl From<DnsAddr> for Multiaddr {
fn from(addr: DnsAddr) -> Self {
multiaddr!(DNS4(&addr.host), TCP(addr.port))
}
}
#[derive(Debug)]
pub struct NetworkConfig {
// connection
pub default_listen: Multiaddr,
pub max_connections: usize,
pub max_frame_length: usize,
pub send_buffer_size: usize,
pub recv_buffer_size: usize,
pub max_wait_streams: usize,
pub write_timeout: u64,
// peer manager
pub bootstraps: Vec<ArcPeer>,
pub whitelist: Vec<Address>,
pub whitelist_peers_only: bool,
pub enable_save_restore: bool,
pub peer_dat_file: PathBuf,
// identity and encryption
pub secio_keypair: SecioKeyPair,
// protocol
pub ping_interval: Duration,
pub ping_timeout: Duration,
pub discovery_sync_interval: Duration,
// routine
pub peer_manager_heart_beat_interval: Duration,
pub heart_beat_interval: Duration,
// rpc
pub rpc_timeout: Duration,
// self check
pub selfcheck_interval: Duration,
}
impl NetworkConfig {
pub fn new() -> Self {
let mut listen_addr = Multiaddr::from(DEFAULT_LISTEN_IP_ADDR);
listen_addr.push(Protocol::TCP(DEFAULT_LISTEN_PORT));
let peer_manager_hb_interval =
Duration::from_secs(DEFAULT_PEER_MANAGER_HEART_BEAT_INTERVAL);
NetworkConfig {
default_listen: listen_addr,
max_connections: DEFAULT_MAX_CONNECTIONS,
max_frame_length: DEFAULT_MAX_FRAME_LENGTH,
send_buffer_size: DEFAULT_BUFFER_SIZE,
recv_buffer_size: DEFAULT_BUFFER_SIZE,
max_wait_streams: DEFAULT_MAX_WAIT_STREAMS,
write_timeout: DEFAULT_WRITE_TIMEOUT,
bootstraps: Default::default(),
whitelist: Default::default(),
whitelist_peers_only: false,
enable_save_restore: false,
peer_dat_file: PathBuf::from(DEFAULT_PEER_DAT_FILE.to_owned()),
secio_keypair: SecioKeyPair::secp256k1_generated(),
ping_interval: Duration::from_secs(DEFAULT_PING_INTERVAL),
ping_timeout: Duration::from_secs(DEFAULT_PING_TIMEOUT),
discovery_sync_interval: Duration::from_secs(DEFAULT_DISCOVERY_SYNC_INTERVAL),
peer_manager_heart_beat_interval: peer_manager_hb_interval,
heart_beat_interval: Duration::from_secs(DEFAULT_SELF_HEART_BEAT_INTERVAL),
rpc_timeout: Duration::from_secs(DEFAULT_RPC_TIMEOUT),
selfcheck_interval: Duration::from_secs(DEFAULT_SELF_CHECK_INTERVAL),
}
}
pub fn max_connections(mut self, max: Option<usize>) -> Self {
if let Some(max) = max {
self.max_connections = max;
}
self
}
pub fn max_frame_length(mut self, max: Option<usize>) -> Self {
if let Some(max) = max {
self.max_frame_length = max;
}
self
}
pub fn send_buffer_size(mut self, size: Option<usize>) -> Self {
if let Some(size) = size {
self.send_buffer_size = size;
}
self
}
pub fn recv_buffer_size(mut self, size: Option<usize>) -> Self {
if let Some(size) = size {
self.recv_buffer_size = size;
}
self
}
pub fn max_wait_streams(mut self, max: Option<usize>) -> Self {
if let Some(max) = max {
self.max_wait_streams = max;
}
self
}
pub fn write_timeout(mut self, timeout: Option<u64>) -> Self {
if let Some(timeout) = timeout {
self.write_timeout = timeout;
}
self
}
pub fn bootstraps(
mut self,
pairs: Vec<(PublicKeyHexStr, PeerAddrStr)>,
) -> ProtocolResult<Self> {
let to_peer = |(pk_hex, peer_addr): (PublicKeyHexStr, PeerAddrStr)| -> _ {
let pk = hex::decode(pk_hex)
.map(PublicKey::Secp256k1)
.map_err(|_| NetworkError::InvalidPublicKey)?;
let peer_id = pk.peer_id();
let mut multiaddr = Self::parse_peer_addr(peer_addr)?;
let peer = ArcPeer::from_pubkey(pk).map_err(NetworkError::from)?;
if let Some(id_bytes) = multiaddr.id_bytes() {
if id_bytes != peer_id.as_bytes() {
error!("network: pubkey doesn't match peer id in {}", multiaddr);
return Ok(peer);
}
}
if !multiaddr.has_id() {
multiaddr.push_id(peer_id);
}
peer.multiaddrs.insert_raw(multiaddr);
Ok(peer)
};
let bootstrap_peers = pairs
.into_iter()
.map(to_peer)
.collect::<ProtocolResult<Vec<_>>>()?;
self.bootstraps = bootstrap_peers;
Ok(self)
}
pub fn | (mut self, chain_addr_strs: Vec<String>) -> ProtocolResult<Self> {
let chain_addrs = chain_addr_strs
.into_iter()
.map(|s| Address::from_hex(&s))
.collect::<ProtocolResult<Vec<_>>>()?;
self.whitelist = chain_addrs;
Ok(self)
}
pub fn whitelist_peers_only(mut self, flag: Option<bool>) -> Self {
if let Some(flag) = flag {
self.whitelist_peers_only = flag;
}
self
}
pub fn peer_dat_file<P: AsRef<Path>>(mut self, path: P) -> Self {
let mut path = path.as_ref().to_owned();
path.push(DEFAULT_PEER_FILE_NAME);
path.set_extension(DEFAULT_PEER_FILE_EXT);
self.peer_dat_file = path;
self
}
pub fn secio_keypair(mut self, sk_hex: PrivateKeyHexStr) -> ProtocolResult<Self> {
let maybe_skp = hex::decode(sk_hex).map(SecioKeyPair::secp256k1_raw_key);
if let Ok(Ok(skp)) = maybe_skp {
self.secio_keypair = skp;
Ok(self)
} else {
Err(NetworkError::InvalidPrivateKey.into())
}
}
pub fn ping_interval(mut self, interval: u64) -> Self {
self.ping_interval = Duration::from_secs(interval);
self
}
pub fn ping_timeout(mut self, timeout: u64) -> Self {
self.ping_timeout = Duration::from_secs(timeout);
self
}
pub fn discovery_sync_interval(mut self, interval: u64) -> Self {
self.discovery_sync_interval = Duration::from_secs(interval);
self
}
pub fn peer_manager_heart_beat_interval(mut self, interval: u64) -> Self {
self.peer_manager_heart_beat_interval = Duration::from_secs(interval);
self
}
pub fn heart_beat_interval(mut self, interval: u64) -> Self {
self.heart_beat_interval = Duration::from_secs(interval);
self
}
pub fn rpc_timeout(mut self, timeout: Option<u64>) -> Self {
if let Some(timeout) = timeout {
self.rpc_timeout = Duration::from_secs(timeout);
}
self
}
pub fn selfcheck_interval(mut self, interval: Option<u64>) -> Self {
if let Some(interval) = interval {
self.selfcheck_interval = Duration::from_secs(interval);
}
self
}
fn parse_peer_addr(addr: PeerAddrStr) -> ProtocolResult<Multiaddr> {
if let Ok(socket_addr) = addr.parse::<SocketAddr>() {
Ok(socket_to_multi_addr(socket_addr))
} else if let Ok(dns_addr) = addr.parse::<DnsAddr>() {
Ok(Multiaddr::from(dns_addr))
} else {
Err(NetworkError::UnexpectedPeerAddr(addr).into())
}
}
}
impl Default for NetworkConfig {
fn default() -> Self {
NetworkConfig::new()
}
}
impl From<&NetworkConfig> for ConnectionConfig {
fn from(config: &NetworkConfig) -> ConnectionConfig {
ConnectionConfig {
secio_keypair: config.secio_keypair.clone(),
max_frame_length: Some(config.max_frame_length),
send_buffer_size: Some(config.send_buffer_size),
recv_buffer_size: Some(config.recv_buffer_size),
max_wait_streams: Some(config.max_wait_streams),
write_timeout: Some(config.write_timeout),
}
}
}
impl From<&NetworkConfig> for PeerManagerConfig {
fn from(config: &NetworkConfig) -> PeerManagerConfig {
PeerManagerConfig {
our_id: config.secio_keypair.peer_id(),
pubkey: config.secio_keypair.public_key(),
bootstraps: config.bootstraps.clone(),
whitelist_by_chain_addrs: config.whitelist.clone(),
whitelist_peers_only: config.whitelist_peers_only,
max_connections: config.max_connections,
routine_interval: config.peer_manager_heart_beat_interval,
peer_dat_file: config.peer_dat_file.clone(),
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct TimeoutConfig {
pub rpc: Duration,
}
impl From<&NetworkConfig> for TimeoutConfig {
fn from(config: &NetworkConfig) -> TimeoutConfig {
TimeoutConfig {
rpc: config.rpc_timeout,
}
}
}
impl From<&NetworkConfig> for SelfCheckConfig {
fn from(config: &NetworkConfig) -> SelfCheckConfig {
SelfCheckConfig {
interval: config.selfcheck_interval,
}
}
}
// TODO: checkout max_frame_length
impl From<&NetworkConfig> for SharedSessionsConfig {
fn from(config: &NetworkConfig) -> Self {
SharedSessionsConfig {
write_timeout: config.write_timeout,
max_stream_window_size: config.max_frame_length,
}
}
}
| whitelist |
schema_test.go | // Copyright 2019 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"reflect"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/store/types"
)
const (
lnColName = "last"
fnColName = "first"
addrColName = "address"
ageColName = "age"
titleColName = "title"
reservedColName = "reserved"
lnColTag = 1
fnColTag = 0
addrColTag = 6
ageColTag = 4
titleColTag = 40
reservedColTag = 50
)
var lnVal = types.String("astley")
var fnVal = types.String("rick")
var addrVal = types.String("123 Fake St")
var ageVal = types.Uint(53)
var titleVal = types.NullValue
var pkCols = []Column{
{lnColName, lnColTag, types.StringKind, true, typeinfo.StringDefaultType, "", false, "", nil},
{fnColName, fnColTag, types.StringKind, true, typeinfo.StringDefaultType, "", false, "", nil},
}
var nonPkCols = []Column{
{addrColName, addrColTag, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{ageColName, ageColTag, types.UintKind, false, typeinfo.FromKind(types.UintKind), "", false, "", nil},
{titleColName, titleColTag, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{reservedColName, reservedColTag, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
}
var allCols = append(append([]Column(nil), pkCols...), nonPkCols...)
func TestSchema(t *testing.T) {
colColl := NewColCollection(allCols...)
schFromCols, err := SchemaFromCols(colColl)
require.NoError(t, err)
testSchema("SchemaFromCols", schFromCols, t)
testKeyColColl := NewColCollection(pkCols...)
testNonKeyColsColl := NewColCollection(nonPkCols...)
schFromPKAndNonPKCols, _ := SchemaFromPKAndNonPKCols(testKeyColColl, testNonKeyColsColl)
testSchema("SchemaFromPKAndNonPKCols", schFromPKAndNonPKCols, t)
eq := SchemasAreEqual(schFromCols, schFromPKAndNonPKCols)
assert.True(t, eq, "schemas should be equal")
}
func TestSchemaWithNoPKs(t *testing.T) {
colColl := NewColCollection(nonPkCols...)
_, _ = SchemaFromCols(colColl)
assert.NotPanics(t, func() {
UnkeyedSchemaFromCols(colColl)
})
}
func TestSchemaOverlap(t *testing.T) {
colColl := NewColCollection(nonPkCols...)
sch, _ := SchemaFromCols(colColl)
names := []string{addrColName, ageColName}
kinds := []types.NomsKind{types.StringKind, types.UintKind}
res := GetSharedCols(sch, names, kinds)
cmp := map[string]uint64{
addrColName: addrColTag,
ageColName: ageColTag,
}
assert.Equal(t, res, cmp)
}
func TestIsKeyless(t *testing.T) {
cc := NewColCollection(allCols...)
pkSch, err := SchemaFromCols(cc)
require.NoError(t, err)
ok := IsKeyless(pkSch)
assert.False(t, ok)
cc = NewColCollection(nonPkCols...)
keylessSch, err := SchemaFromCols(cc)
assert.NoError(t, err)
ok = IsKeyless(keylessSch)
assert.True(t, ok)
}
func TestValidateForInsert(t *testing.T) {
t.Run("Validate good", func(t *testing.T) {
colColl := NewColCollection(allCols...)
assert.NoError(t, ValidateForInsert(colColl))
})
t.Run("Name collision", func(t *testing.T) {
cols := append(allCols, Column{titleColName, 100, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil})
colColl := NewColCollection(cols...)
err := ValidateForInsert(colColl)
assert.Error(t, err)
assert.Equal(t, err, ErrColNameCollision)
})
t.Run("Case insensitive collision", func(t *testing.T) {
cols := append(allCols, Column{strings.ToUpper(titleColName), 100, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil})
colColl := NewColCollection(cols...)
err := ValidateForInsert(colColl)
assert.Error(t, err)
assert.Equal(t, err, ErrColNameCollision)
})
t.Run("Tag collision", func(t *testing.T) {
cols := append(allCols, Column{"newCol", lnColTag, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil})
colColl := NewColCollection(cols...)
err := ValidateForInsert(colColl)
assert.Error(t, err)
assert.Equal(t, err, ErrColTagCollision)
})
}
func testSchema(method string, sch Schema, t *testing.T) {
validateCols(t, allCols, sch.GetAllCols(), method+"GetAllCols")
validateCols(t, pkCols, sch.GetPKCols(), method+"GetPKCols")
validateCols(t, nonPkCols, sch.GetNonPKCols(), method+"GetNonPKCols")
extracted, err := ExtractAllColNames(sch)
assert.NoError(t, err)
expExt := map[uint64]string{
lnColTag: lnColName,
fnColTag: fnColName,
ageColTag: ageColName,
addrColTag: addrColName,
titleColTag: titleColName,
reservedColTag: reservedColName,
}
if !reflect.DeepEqual(extracted, expExt) {
t.Error("extracted columns did not match expectation")
}
if col, ok := ColFromName(sch, titleColName); !ok {
t.Error("Failed to get by name")
} else if col.Tag != titleColTag {
t.Error("Unexpected tag")
}
if col, ok := ColFromTag(sch, titleColTag); !ok {
t.Error("Failed to get by name")
} else if col.Name != titleColName {
t.Error("Unexpected tag")
}
}
func validateCols(t *testing.T, cols []Column, colColl *ColCollection, msg string) | {
if !reflect.DeepEqual(cols, colColl.cols) {
t.Error()
}
} |
|
schema.type.test.js | 'use strict';
/**
* Module dependencies.
*/
const mongoose = require('./common').mongoose;
const assert = require('assert');
const Schema = mongoose.Schema;
describe('schematype', function() {
it('honors the selected option', function(done) {
const s = new Schema({thought: {type: String, select: false}});
assert.ok(!s.path('thought').selected);
const a = new Schema({thought: {type: String, select: true}});
assert.ok(a.path('thought').selected);
done();
});
it('properly handles specifying index in combination with unique or sparse', function(done) {
let s = new Schema({name: {type: String, index: true, unique: true}});
assert.deepEqual(s.path('name')._index, {unique: true});
s = new Schema({name: {type: String, unique: true, index: true}});
assert.deepEqual(s.path('name')._index, {unique: true});
s = new Schema({name: {type: String, index: true, sparse: true}});
assert.deepEqual(s.path('name')._index, {sparse: true});
s = new Schema({name: {type: String, sparse: true, index: true}});
assert.deepEqual(s.path('name')._index, {sparse: true});
done();
});
describe('checkRequired()', function() {
it('with inherits (gh-7486)', function() {
const m = new mongoose.Mongoose();
function CustomNumber(path, options) {
m.Schema.Types.Number.call(this, path, options);
}
CustomNumber.prototype.cast = v => v;
require('util').inherits(CustomNumber, m.Schema.Types.Number);
mongoose.Schema.Types.CustomNumber = CustomNumber;
function | (path, options) {
m.Schema.Types.String.call(this, path, options);
}
CustomString.prototype.cast = v => v;
require('util').inherits(CustomString, m.Schema.Types.String);
mongoose.Schema.Types.CustomString = CustomString;
function CustomObjectId(path, options) {
m.Schema.Types.ObjectId.call(this, path, options);
}
CustomObjectId.prototype.cast = v => v;
require('util').inherits(CustomObjectId, m.Schema.Types.ObjectId);
mongoose.Schema.Types.CustomObjectId = CustomObjectId;
const s = new Schema({
foo: { type: CustomNumber, required: true },
bar: { type: CustomString, required: true },
baz: { type: CustomObjectId, required: true }
});
const M = m.model('Test', s);
const doc = new M({ foo: 1, bar: '2', baz: new mongoose.Types.ObjectId() });
const err = doc.validateSync();
assert.ifError(err);
});
});
});
| CustomString |
.eslintrc.js | module.exports = {
env: { | } | browser: true,
}, |
intenclr.rs | #[doc = "Reader of register INTENCLR"]
pub type R = crate::R<u32, super::INTENCLR>;
#[doc = "Writer for register INTENCLR"]
pub type W = crate::W<u32, super::INTENCLR>;
#[doc = "Register INTENCLR `reset()`'s with value 0"]
impl crate::ResetValue for super::INTENCLR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Disable interrupt on READY event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum READY_A {
#[doc = "0: Interrupt disabled."]
DISABLED,
#[doc = "1: Interrupt enabled."]
ENABLED,
}
impl From<READY_A> for bool {
#[inline(always)]
fn from(variant: READY_A) -> Self {
match variant {
READY_A::DISABLED => false,
READY_A::ENABLED => true,
}
}
}
#[doc = "Reader of field `READY`"]
pub type READY_R = crate::R<bool, READY_A>;
impl READY_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> READY_A {
match self.bits {
false => READY_A::DISABLED,
true => READY_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == READY_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == READY_A::ENABLED
}
}
#[doc = "Disable interrupt on READY event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum READY_AW {
#[doc = "1: Disable interrupt on write."]
CLEAR,
}
impl From<READY_AW> for bool {
#[inline(always)]
fn from(variant: READY_AW) -> Self {
match variant {
READY_AW::CLEAR => true,
}
}
}
#[doc = "Write proxy for field `READY`"]
pub struct READY_W<'a> {
w: &'a mut W,
}
impl<'a> READY_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: READY_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Disable interrupt on write."]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(READY_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Disable interrupt on ADDRESS event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ADDRESS_A {
#[doc = "0: Interrupt disabled."]
DISABLED,
#[doc = "1: Interrupt enabled."]
ENABLED,
}
impl From<ADDRESS_A> for bool {
#[inline(always)]
fn from(variant: ADDRESS_A) -> Self {
match variant {
ADDRESS_A::DISABLED => false,
ADDRESS_A::ENABLED => true,
}
}
}
#[doc = "Reader of field `ADDRESS`"]
pub type ADDRESS_R = crate::R<bool, ADDRESS_A>;
impl ADDRESS_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADDRESS_A {
match self.bits {
false => ADDRESS_A::DISABLED,
true => ADDRESS_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == ADDRESS_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == ADDRESS_A::ENABLED
}
}
#[doc = "Disable interrupt on ADDRESS event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ADDRESS_AW {
#[doc = "1: Disable interrupt on write."]
CLEAR,
}
impl From<ADDRESS_AW> for bool {
#[inline(always)]
fn from(variant: ADDRESS_AW) -> Self {
match variant {
ADDRESS_AW::CLEAR => true,
}
}
}
#[doc = "Write proxy for field `ADDRESS`"]
pub struct ADDRESS_W<'a> {
w: &'a mut W,
}
impl<'a> ADDRESS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ADDRESS_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Disable interrupt on write."]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(ADDRESS_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Disable interrupt on PAYLOAD event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAYLOAD_A {
#[doc = "0: Interrupt disabled."]
DISABLED,
#[doc = "1: Interrupt enabled."]
ENABLED,
}
impl From<PAYLOAD_A> for bool {
#[inline(always)]
fn from(variant: PAYLOAD_A) -> Self {
match variant {
PAYLOAD_A::DISABLED => false,
PAYLOAD_A::ENABLED => true,
}
}
}
#[doc = "Reader of field `PAYLOAD`"]
pub type PAYLOAD_R = crate::R<bool, PAYLOAD_A>;
impl PAYLOAD_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAYLOAD_A {
match self.bits {
false => PAYLOAD_A::DISABLED,
true => PAYLOAD_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == PAYLOAD_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == PAYLOAD_A::ENABLED
}
}
#[doc = "Disable interrupt on PAYLOAD event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAYLOAD_AW {
#[doc = "1: Disable interrupt on write."]
CLEAR,
}
impl From<PAYLOAD_AW> for bool {
#[inline(always)]
fn from(variant: PAYLOAD_AW) -> Self {
match variant {
PAYLOAD_AW::CLEAR => true,
}
}
}
#[doc = "Write proxy for field `PAYLOAD`"]
pub struct PAYLOAD_W<'a> {
w: &'a mut W,
}
impl<'a> PAYLOAD_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAYLOAD_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Disable interrupt on write."]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(PAYLOAD_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Disable interrupt on END event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum END_A {
#[doc = "0: Interrupt disabled."]
DISABLED,
#[doc = "1: Interrupt enabled."]
ENABLED,
}
impl From<END_A> for bool {
#[inline(always)]
fn from(variant: END_A) -> Self {
match variant {
END_A::DISABLED => false,
END_A::ENABLED => true,
}
}
}
#[doc = "Reader of field `END`"]
pub type END_R = crate::R<bool, END_A>;
impl END_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> END_A {
match self.bits {
false => END_A::DISABLED,
true => END_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == END_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == END_A::ENABLED
}
}
#[doc = "Disable interrupt on END event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum END_AW {
#[doc = "1: Disable interrupt on write."]
CLEAR,
}
impl From<END_AW> for bool {
#[inline(always)]
fn from(variant: END_AW) -> Self {
match variant {
END_AW::CLEAR => true,
}
}
}
#[doc = "Write proxy for field `END`"]
pub struct END_W<'a> {
w: &'a mut W,
}
impl<'a> END_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: END_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Disable interrupt on write."]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(END_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Disable interrupt on DISABLED event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DISABLED_A {
#[doc = "0: Interrupt disabled."]
DISABLED,
#[doc = "1: Interrupt enabled."]
ENABLED,
}
impl From<DISABLED_A> for bool {
#[inline(always)]
fn from(variant: DISABLED_A) -> Self {
match variant {
DISABLED_A::DISABLED => false,
DISABLED_A::ENABLED => true,
}
}
}
#[doc = "Reader of field `DISABLED`"]
pub type DISABLED_R = crate::R<bool, DISABLED_A>;
impl DISABLED_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DISABLED_A {
match self.bits {
false => DISABLED_A::DISABLED,
true => DISABLED_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == DISABLED_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == DISABLED_A::ENABLED
}
}
#[doc = "Disable interrupt on DISABLED event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DISABLED_AW {
#[doc = "1: Disable interrupt on write."]
CLEAR,
}
impl From<DISABLED_AW> for bool {
#[inline(always)]
fn from(variant: DISABLED_AW) -> Self {
match variant {
DISABLED_AW::CLEAR => true,
}
}
}
#[doc = "Write proxy for field `DISABLED`"]
pub struct DISABLED_W<'a> {
w: &'a mut W,
}
impl<'a> DISABLED_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: DISABLED_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Disable interrupt on write."]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(DISABLED_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Disable interrupt on DEVMATCH event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DEVMATCH_A {
#[doc = "0: Interrupt disabled."]
DISABLED,
#[doc = "1: Interrupt enabled."]
ENABLED,
}
impl From<DEVMATCH_A> for bool {
#[inline(always)]
fn from(variant: DEVMATCH_A) -> Self {
match variant {
DEVMATCH_A::DISABLED => false,
DEVMATCH_A::ENABLED => true,
}
}
}
#[doc = "Reader of field `DEVMATCH`"]
pub type DEVMATCH_R = crate::R<bool, DEVMATCH_A>;
impl DEVMATCH_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DEVMATCH_A {
match self.bits {
false => DEVMATCH_A::DISABLED,
true => DEVMATCH_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == DEVMATCH_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == DEVMATCH_A::ENABLED
}
}
#[doc = "Disable interrupt on DEVMATCH event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DEVMATCH_AW {
#[doc = "1: Disable interrupt on write."]
CLEAR,
}
impl From<DEVMATCH_AW> for bool {
#[inline(always)]
fn from(variant: DEVMATCH_AW) -> Self {
match variant {
DEVMATCH_AW::CLEAR => true,
}
}
}
#[doc = "Write proxy for field `DEVMATCH`"]
pub struct DEVMATCH_W<'a> {
w: &'a mut W,
}
impl<'a> DEVMATCH_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: DEVMATCH_AW) -> &'a mut W {
|
}
#[doc = "Disable interrupt on write."]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(DEVMATCH_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Disable interrupt on DEVMISS event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DEVMISS_A {
#[doc = "0: Interrupt disabled."]
DISABLED,
#[doc = "1: Interrupt enabled."]
ENABLED,
}
impl From<DEVMISS_A> for bool {
#[inline(always)]
fn from(variant: DEVMISS_A) -> Self {
match variant {
DEVMISS_A::DISABLED => false,
DEVMISS_A::ENABLED => true,
}
}
}
#[doc = "Reader of field `DEVMISS`"]
pub type DEVMISS_R = crate::R<bool, DEVMISS_A>;
impl DEVMISS_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DEVMISS_A {
match self.bits {
false => DEVMISS_A::DISABLED,
true => DEVMISS_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == DEVMISS_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == DEVMISS_A::ENABLED
}
}
#[doc = "Disable interrupt on DEVMISS event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DEVMISS_AW {
#[doc = "1: Disable interrupt on write."]
CLEAR,
}
impl From<DEVMISS_AW> for bool {
#[inline(always)]
fn from(variant: DEVMISS_AW) -> Self {
match variant {
DEVMISS_AW::CLEAR => true,
}
}
}
#[doc = "Write proxy for field `DEVMISS`"]
pub struct DEVMISS_W<'a> {
w: &'a mut W,
}
impl<'a> DEVMISS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: DEVMISS_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Disable interrupt on write."]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(DEVMISS_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "Disable interrupt on RSSIEND event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RSSIEND_A {
#[doc = "0: Interrupt disabled."]
DISABLED,
#[doc = "1: Interrupt enabled."]
ENABLED,
}
impl From<RSSIEND_A> for bool {
#[inline(always)]
fn from(variant: RSSIEND_A) -> Self {
match variant {
RSSIEND_A::DISABLED => false,
RSSIEND_A::ENABLED => true,
}
}
}
#[doc = "Reader of field `RSSIEND`"]
pub type RSSIEND_R = crate::R<bool, RSSIEND_A>;
impl RSSIEND_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RSSIEND_A {
match self.bits {
false => RSSIEND_A::DISABLED,
true => RSSIEND_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == RSSIEND_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == RSSIEND_A::ENABLED
}
}
#[doc = "Disable interrupt on RSSIEND event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RSSIEND_AW {
#[doc = "1: Disable interrupt on write."]
CLEAR,
}
impl From<RSSIEND_AW> for bool {
#[inline(always)]
fn from(variant: RSSIEND_AW) -> Self {
match variant {
RSSIEND_AW::CLEAR => true,
}
}
}
#[doc = "Write proxy for field `RSSIEND`"]
pub struct RSSIEND_W<'a> {
w: &'a mut W,
}
impl<'a> RSSIEND_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RSSIEND_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Disable interrupt on write."]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(RSSIEND_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Disable interrupt on BCMATCH event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum BCMATCH_A {
#[doc = "0: Interrupt disabled."]
DISABLED,
#[doc = "1: Interrupt enabled."]
ENABLED,
}
impl From<BCMATCH_A> for bool {
#[inline(always)]
fn from(variant: BCMATCH_A) -> Self {
match variant {
BCMATCH_A::DISABLED => false,
BCMATCH_A::ENABLED => true,
}
}
}
#[doc = "Reader of field `BCMATCH`"]
pub type BCMATCH_R = crate::R<bool, BCMATCH_A>;
impl BCMATCH_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BCMATCH_A {
match self.bits {
false => BCMATCH_A::DISABLED,
true => BCMATCH_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == BCMATCH_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == BCMATCH_A::ENABLED
}
}
#[doc = "Disable interrupt on BCMATCH event.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum BCMATCH_AW {
#[doc = "1: Disable interrupt on write."]
CLEAR,
}
impl From<BCMATCH_AW> for bool {
#[inline(always)]
fn from(variant: BCMATCH_AW) -> Self {
match variant {
BCMATCH_AW::CLEAR => true,
}
}
}
#[doc = "Write proxy for field `BCMATCH`"]
pub struct BCMATCH_W<'a> {
w: &'a mut W,
}
impl<'a> BCMATCH_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: BCMATCH_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Disable interrupt on write."]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(BCMATCH_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
impl R {
#[doc = "Bit 0 - Disable interrupt on READY event."]
#[inline(always)]
pub fn ready(&self) -> READY_R {
READY_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Disable interrupt on ADDRESS event."]
#[inline(always)]
pub fn address(&self) -> ADDRESS_R {
ADDRESS_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Disable interrupt on PAYLOAD event."]
#[inline(always)]
pub fn payload(&self) -> PAYLOAD_R {
PAYLOAD_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Disable interrupt on END event."]
#[inline(always)]
pub fn end(&self) -> END_R {
END_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Disable interrupt on DISABLED event."]
#[inline(always)]
pub fn disabled(&self) -> DISABLED_R {
DISABLED_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - Disable interrupt on DEVMATCH event."]
#[inline(always)]
pub fn devmatch(&self) -> DEVMATCH_R {
DEVMATCH_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - Disable interrupt on DEVMISS event."]
#[inline(always)]
pub fn devmiss(&self) -> DEVMISS_R {
DEVMISS_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - Disable interrupt on RSSIEND event."]
#[inline(always)]
pub fn rssiend(&self) -> RSSIEND_R {
RSSIEND_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 10 - Disable interrupt on BCMATCH event."]
#[inline(always)]
pub fn bcmatch(&self) -> BCMATCH_R {
BCMATCH_R::new(((self.bits >> 10) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Disable interrupt on READY event."]
#[inline(always)]
pub fn ready(&mut self) -> READY_W {
READY_W { w: self }
}
#[doc = "Bit 1 - Disable interrupt on ADDRESS event."]
#[inline(always)]
pub fn address(&mut self) -> ADDRESS_W {
ADDRESS_W { w: self }
}
#[doc = "Bit 2 - Disable interrupt on PAYLOAD event."]
#[inline(always)]
pub fn payload(&mut self) -> PAYLOAD_W {
PAYLOAD_W { w: self }
}
#[doc = "Bit 3 - Disable interrupt on END event."]
#[inline(always)]
pub fn end(&mut self) -> END_W {
END_W { w: self }
}
#[doc = "Bit 4 - Disable interrupt on DISABLED event."]
#[inline(always)]
pub fn disabled(&mut self) -> DISABLED_W {
DISABLED_W { w: self }
}
#[doc = "Bit 5 - Disable interrupt on DEVMATCH event."]
#[inline(always)]
pub fn devmatch(&mut self) -> DEVMATCH_W {
DEVMATCH_W { w: self }
}
#[doc = "Bit 6 - Disable interrupt on DEVMISS event."]
#[inline(always)]
pub fn devmiss(&mut self) -> DEVMISS_W {
DEVMISS_W { w: self }
}
#[doc = "Bit 7 - Disable interrupt on RSSIEND event."]
#[inline(always)]
pub fn rssiend(&mut self) -> RSSIEND_W {
RSSIEND_W { w: self }
}
#[doc = "Bit 10 - Disable interrupt on BCMATCH event."]
#[inline(always)]
pub fn bcmatch(&mut self) -> BCMATCH_W {
BCMATCH_W { w: self }
}
}
| {
self.bit(variant.into())
} |
create-calculation.js | const Dispatcher = require('../../../core/http/server/dispatcher')
/**
* @memberof Api
* @extends {superhero/core/http/server/dispatcher}
*/
class | extends Dispatcher
{
dispatch()
{
const
calculator = this.locator.locate('domain/aggregate/calculator'),
calculationId = calculator.createCalculation()
this.view.body.id = calculationId
}
}
module.exports = CreateCalculationEndpoint
| CreateCalculationEndpoint |
zeroconf_factory.py | # Copyright 2017 SrMouraSilva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import MagicMock
from webservice.search.pybonjour_service import PybonjourService
from webservice.search.zeroconf_service import ZeroconfService
class ZeroconfFactory(object):
@staticmethod
def generate(name, port):
if PybonjourService.has_support():
|
elif ZeroconfService.has_support():
return ZeroconfService(name, port)
else:
return MagicMock()
| return PybonjourService(name, port) |
_virtual_machines_operations.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._virtual_machines_operations import build_assess_patches_request_initial, build_capture_request_initial, build_convert_to_managed_disks_request_initial, build_create_or_update_request_initial, build_deallocate_request_initial, build_delete_request_initial, build_generalize_request, build_get_request, build_install_patches_request_initial, build_instance_view_request, build_list_all_request, build_list_available_sizes_request, build_list_by_location_request, build_list_request, build_perform_maintenance_request_initial, build_power_off_request_initial, build_reapply_request_initial, build_redeploy_request_initial, build_reimage_request_initial, build_restart_request_initial, build_retrieve_boot_diagnostics_data_request, build_run_command_request_initial, build_simulate_eviction_request, build_start_request_initial, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachinesOperations:
"""VirtualMachinesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_location(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineListResult"]:
"""Gets all the virtual machines under the specified subscription for the specified location.
:param location: The location for which virtual machines under the subscription are queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=self.list_by_location.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/virtualMachines'} # type: ignore
async def _capture_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineCaptureParameters",
**kwargs: Any
) -> Optional["_models.VirtualMachineCaptureResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualMachineCaptureResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'VirtualMachineCaptureParameters')
request = build_capture_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._capture_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'} # type: ignore
@distributed_trace_async
async def begin_capture(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineCaptureParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineCaptureResult"]:
"""Captures the VM by copying virtual hard disks of the VM and outputs a template that can be used
to create similar VMs.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Capture Virtual Machine operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineCaptureParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineCaptureResult or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineCaptureResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._capture_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachine",
**kwargs: Any
) -> "_models.VirtualMachine":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'VirtualMachine')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachine",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachine"]:
"""The operation to create or update a virtual machine. Please note some properties can be set
only during virtual machine creation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Create Virtual Machine operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachine
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachine or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachine]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineUpdate",
**kwargs: Any
) -> "_models.VirtualMachine":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'VirtualMachineUpdate')
request = build_update_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachine"]:
"""The operation to update a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Update Virtual Machine operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachine or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachine]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
vm_name: str,
force_deletion: Optional[bool] = None,
**kwargs: Any
) -> None:
|
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
vm_name: str,
force_deletion: Optional[bool] = None,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to delete a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param force_deletion: Optional parameter to force delete virtual machines.
:type force_deletion: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
force_deletion=force_deletion,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
vm_name: str,
expand: Optional[Union[str, "_models.InstanceViewTypes"]] = None,
**kwargs: Any
) -> "_models.VirtualMachine":
"""Retrieves information about the model view or the instance view of a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param expand: The expand expression to apply on the operation. 'InstanceView' retrieves a
snapshot of the runtime properties of the virtual machine that is managed by the platform and
can change outside of control plane operations. 'UserData' retrieves the UserData property as
part of the VM model view that was provided by the user during the VM Create/Update operation.
:type expand: str or ~azure.mgmt.compute.v2021_04_01.models.InstanceViewTypes
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachine, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachine
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def instance_view(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> "_models.VirtualMachineInstanceView":
"""Retrieves information about the run-time state of a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineInstanceView, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineInstanceView
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineInstanceView"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_instance_view_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.instance_view.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineInstanceView', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
instance_view.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/instanceView'} # type: ignore
async def _convert_to_managed_disks_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_convert_to_managed_disks_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._convert_to_managed_disks_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_convert_to_managed_disks_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks'} # type: ignore
@distributed_trace_async
async def begin_convert_to_managed_disks(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Converts virtual machine disks from blob-based to managed disks. Virtual machine must be
stop-deallocated before invoking this operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._convert_to_managed_disks_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_convert_to_managed_disks.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks'} # type: ignore
async def _deallocate_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_deallocate_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._deallocate_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_deallocate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'} # type: ignore
@distributed_trace_async
async def begin_deallocate(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Shuts down the virtual machine and releases the compute resources. You are not billed for the
compute resources that this virtual machine uses.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._deallocate_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_deallocate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'} # type: ignore
@distributed_trace_async
async def generalize(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
"""Sets the OS state of the virtual machine to generalized. It is recommended to sysprep the
virtual machine before performing this operation. :code:`<br>`For Windows, please refer to
`Create a managed image of a generalized VM in Azure
<https://docs.microsoft.com/azure/virtual-machines/windows/capture-image-resource>`_.:code:`<br>`For
Linux, please refer to `How to create an image of a virtual machine or VHD
<https://docs.microsoft.com/azure/virtual-machines/linux/capture-image>`_.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_generalize_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.generalize.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
generalize.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineListResult"]:
"""Lists all of the virtual machines in the specified resource group. Use the nextLink property in
the response to get the next page of virtual machines.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines'} # type: ignore
@distributed_trace
def list_all(
self,
status_only: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineListResult"]:
"""Lists all of the virtual machines in the specified subscription. Use the nextLink property in
the response to get the next page of virtual machines.
:param status_only: statusOnly=true enables fetching run time status of all Virtual Machines in
the subscription.
:type status_only: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
status_only=status_only,
template_url=self.list_all.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
status_only=status_only,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines'} # type: ignore
@distributed_trace
def list_available_sizes(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineSizeListResult"]:
"""Lists all available virtual machine sizes to which the specified virtual machine can be
resized.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineSizeListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineSizeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineSizeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.list_available_sizes.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_available_sizes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes'} # type: ignore
async def _power_off_initial(
self,
resource_group_name: str,
vm_name: str,
skip_shutdown: Optional[bool] = False,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_power_off_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
skip_shutdown=skip_shutdown,
template_url=self._power_off_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_power_off_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'} # type: ignore
@distributed_trace_async
async def begin_power_off(
self,
resource_group_name: str,
vm_name: str,
skip_shutdown: Optional[bool] = False,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to power off (stop) a virtual machine. The virtual machine can be restarted with
the same provisioned resources. You are still charged for this virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param skip_shutdown: The parameter to request non-graceful VM shutdown. True value for this
flag indicates non-graceful shutdown whereas false indicates otherwise. Default value for this
flag is false if not specified.
:type skip_shutdown: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._power_off_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
skip_shutdown=skip_shutdown,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_power_off.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'} # type: ignore
async def _reapply_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_reapply_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._reapply_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reapply_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply'} # type: ignore
@distributed_trace_async
async def begin_reapply(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to reapply a virtual machine's state.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reapply_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reapply.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply'} # type: ignore
async def _restart_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_restart_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._restart_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'} # type: ignore
@distributed_trace_async
async def begin_restart(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to restart a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._restart_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_start_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._start_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'} # type: ignore
@distributed_trace_async
async def begin_start(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to start a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'} # type: ignore
async def _redeploy_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_redeploy_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._redeploy_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_redeploy_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'} # type: ignore
@distributed_trace_async
async def begin_redeploy(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Shuts down the virtual machine, moves it to a new node, and powers it back on.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._redeploy_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_redeploy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'} # type: ignore
async def _reimage_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: Optional["_models.VirtualMachineReimageParameters"] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'VirtualMachineReimageParameters')
else:
_json = None
request = build_reimage_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._reimage_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reimage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage'} # type: ignore
@distributed_trace_async
async def begin_reimage(
self,
resource_group_name: str,
vm_name: str,
parameters: Optional["_models.VirtualMachineReimageParameters"] = None,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reimages the virtual machine which has an ephemeral OS disk back to its initial state.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Reimage Virtual Machine operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineReimageParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reimage_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reimage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage'} # type: ignore
@distributed_trace_async
async def retrieve_boot_diagnostics_data(
self,
resource_group_name: str,
vm_name: str,
sas_uri_expiration_time_in_minutes: Optional[int] = None,
**kwargs: Any
) -> "_models.RetrieveBootDiagnosticsDataResult":
"""The operation to retrieve SAS URIs for a virtual machine's boot diagnostic logs.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param sas_uri_expiration_time_in_minutes: Expiration duration in minutes for the SAS URIs with
a value between 1 to 1440 minutes. :code:`<br>`:code:`<br>`NOTE: If not specified, SAS URIs
will be generated with a default expiration duration of 120 minutes.
:type sas_uri_expiration_time_in_minutes: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RetrieveBootDiagnosticsDataResult, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.RetrieveBootDiagnosticsDataResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RetrieveBootDiagnosticsDataResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_retrieve_boot_diagnostics_data_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
sas_uri_expiration_time_in_minutes=sas_uri_expiration_time_in_minutes,
template_url=self.retrieve_boot_diagnostics_data.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RetrieveBootDiagnosticsDataResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_boot_diagnostics_data.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/retrieveBootDiagnosticsData'} # type: ignore
async def _perform_maintenance_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_perform_maintenance_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._perform_maintenance_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_perform_maintenance_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance'} # type: ignore
@distributed_trace_async
async def begin_perform_maintenance(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to perform maintenance on a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._perform_maintenance_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_perform_maintenance.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance'} # type: ignore
@distributed_trace_async
async def simulate_eviction(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
"""The operation to simulate the eviction of spot virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_simulate_eviction_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.simulate_eviction.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
simulate_eviction.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/simulateEviction'} # type: ignore
async def _assess_patches_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> Optional["_models.VirtualMachineAssessPatchesResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualMachineAssessPatchesResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_assess_patches_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._assess_patches_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineAssessPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_assess_patches_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/assessPatches'} # type: ignore
@distributed_trace_async
async def begin_assess_patches(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineAssessPatchesResult"]:
"""Assess patches on the VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineAssessPatchesResult or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineAssessPatchesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineAssessPatchesResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._assess_patches_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineAssessPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_assess_patches.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/assessPatches'} # type: ignore
async def _install_patches_initial(
self,
resource_group_name: str,
vm_name: str,
install_patches_input: "_models.VirtualMachineInstallPatchesParameters",
**kwargs: Any
) -> Optional["_models.VirtualMachineInstallPatchesResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualMachineInstallPatchesResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(install_patches_input, 'VirtualMachineInstallPatchesParameters')
request = build_install_patches_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._install_patches_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineInstallPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_install_patches_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/installPatches'} # type: ignore
@distributed_trace_async
async def begin_install_patches(
self,
resource_group_name: str,
vm_name: str,
install_patches_input: "_models.VirtualMachineInstallPatchesParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineInstallPatchesResult"]:
"""Installs patches on the VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param install_patches_input: Input for InstallPatches as directly received by the API.
:type install_patches_input:
~azure.mgmt.compute.v2021_04_01.models.VirtualMachineInstallPatchesParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineInstallPatchesResult
or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineInstallPatchesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineInstallPatchesResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._install_patches_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
install_patches_input=install_patches_input,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineInstallPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_install_patches.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/installPatches'} # type: ignore
async def _run_command_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.RunCommandInput",
**kwargs: Any
) -> Optional["_models.RunCommandResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.RunCommandResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'RunCommandInput')
request = build_run_command_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._run_command_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RunCommandResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_run_command_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand'} # type: ignore
@distributed_trace_async
async def begin_run_command(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.RunCommandInput",
**kwargs: Any
) -> AsyncLROPoller["_models.RunCommandResult"]:
"""Run command on the VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Run command operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.RunCommandInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.RunCommandResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunCommandResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._run_command_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('RunCommandResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_run_command.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand'} # type: ignore
| cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
force_deletion=force_deletion,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {}) |
fileMock.js | module.exports = 'this is a file mock' |
||
root.go | /*
Copyright © 2022 Alexander Block <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package commands
import (
"context"
"fmt"
"github.com/kluctl/kluctl/v2/pkg/status"
"github.com/kluctl/kluctl/v2/pkg/utils"
"github.com/kluctl/kluctl/v2/pkg/utils/uo"
"github.com/kluctl/kluctl/v2/pkg/utils/versions"
"github.com/kluctl/kluctl/v2/pkg/version"
"github.com/kluctl/kluctl/v2/pkg/yaml"
"github.com/mattn/go-isatty"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"k8s.io/klog/v2"
"log"
"net/http"
"os"
"path/filepath"
"strings"
"time"
)
const latestReleaseUrl = "https://api.github.com/repos/kluctl/kluctl/releases/latest"
type cli struct {
Debug bool `group:"global" help:"Enable debug logging"`
NoUpdateCheck bool `group:"global" help:"Disable update check on startup"`
CheckImageUpdates checkImageUpdatesCmd `cmd:"" help:"Render deployment and check if any images have new tags available"`
Delete deleteCmd `cmd:"" help:"Delete a target (or parts of it) from the corresponding cluster"`
Deploy deployCmd `cmd:"" help:"Deploys a target to the corresponding cluster"`
Diff diffCmd `cmd:"" help:"Perform a diff between the locally rendered target and the already deployed target"` | HelmPull helmPullCmd `cmd:"" help:"Recursively searches for 'helm-chart.yaml' files and pulls the specified Helm charts"`
HelmUpdate helmUpdateCmd `cmd:"" help:"Recursively searches for 'helm-chart.yaml' files and checks for new available versions"`
ListImages listImagesCmd `cmd:"" help:"Renders the target and outputs all images used via 'images.get_image(...)"`
ListTargets listTargetsCmd `cmd:"" help:"Outputs a yaml list with all target, including dynamic targets"`
PokeImages pokeImagesCmd `cmd:"" help:"Replace all images in target"`
Prune pruneCmd `cmd:"" help:"Searches the target cluster for prunable objects and deletes them"`
Render renderCmd `cmd:"" help:"Renders all resources and configuration files"`
Seal sealCmd `cmd:"" help:"Seal secrets based on target's sealingConfig"`
Validate validateCmd `cmd:"" help:"Validates the already deployed deployment"`
Version versionCmd `cmd:"" help:"Print kluctl version"`
}
var flagGroups = []groupInfo{
{group: "global", title: "Global arguments:"},
{group: "project", title: "Project arguments:", description: "Define where and how to load the kluctl project and its components from."},
{group: "images", title: "Image arguments:", description: "Control fixed images and update behaviour."},
{group: "inclusion", title: "Inclusion/Exclusion arguments:", description: "Control inclusion/exclusion."},
{group: "misc", title: "Misc arguments:", description: "Command specific arguments."},
}
var cliCtx = context.Background()
func setupStatusHandler() {
var sh status.StatusHandler
if isatty.IsTerminal(os.Stderr.Fd()) {
sh = status.NewMultiLineStatusHandler(cliCtx, os.Stderr, false)
} else {
sh = status.NewSimpleStatusHandler(func(message string) {
_, _ = fmt.Fprintf(os.Stderr, "%s\n", message)
}, false)
}
cliCtx = status.NewContext(cliCtx, sh)
klog.LogToStderr(false)
klog.SetOutput(status.NewLineRedirector(sh.Info))
log.SetOutput(status.NewLineRedirector(sh.Info))
}
type VersionCheckState struct {
LastVersionCheck time.Time `yaml:"lastVersionCheck"`
}
func (c *cli) checkNewVersion() {
if c.NoUpdateCheck {
return
}
if version.GetVersion() == "0.0.0" {
return
}
versionCheckPath := filepath.Join(utils.GetTmpBaseDir(), "version_check.yaml")
var versionCheckState VersionCheckState
err := yaml.ReadYamlFile(versionCheckPath, &versionCheckState)
if err == nil {
if time.Now().Sub(versionCheckState.LastVersionCheck) < time.Hour {
return
}
}
versionCheckState.LastVersionCheck = time.Now()
_ = yaml.WriteYamlFile(versionCheckPath, &versionCheckState)
s := status.Start(cliCtx, "Checking for new kluctl version")
defer s.Failed()
r, err := http.Get(latestReleaseUrl)
if err != nil {
return
}
defer r.Body.Close()
var release uo.UnstructuredObject
err = yaml.ReadYamlStream(r.Body, &release)
if err != nil {
return
}
latestVersionStr, ok, _ := release.GetNestedString("tag_name")
if !ok {
return
}
if strings.HasPrefix(latestVersionStr, "v") {
latestVersionStr = latestVersionStr[1:]
}
latestVersion := versions.LooseVersion(latestVersionStr)
localVersion := versions.LooseVersion(version.GetVersion())
if localVersion.Less(latestVersion, true) {
s.Update(fmt.Sprintf("You are using an outdated version (%v) of kluctl. You should update soon to version %v", localVersion, latestVersion))
} else {
s.Update("Your kluctl version is up-to-date")
}
s.Success()
}
func (c *cli) preRun() error {
status.FromContext(cliCtx).SetTrace(c.Debug)
c.checkNewVersion()
return nil
}
func (c *cli) Run() error {
return nil
}
func initViper() {
viper.SetConfigName("config")
viper.SetConfigType("yaml")
viper.AddConfigPath("/etc/kluctl/")
viper.AddConfigPath("$HOME/.kluctl")
if err := viper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
status.Error(cliCtx, err.Error())
os.Exit(1)
}
}
viper.SetEnvPrefix("kluctl")
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
viper.AutomaticEnv()
}
func Execute() {
root := cli{}
rootCmd, err := buildRootCobraCmd(&root, "kluctl",
"Deploy and manage complex deployments on Kubernetes",
`The missing glue to put together large Kubernetes deployments,
composed of multiple smaller parts (Helm/Kustomize/...) in a manageable and unified way.`,
flagGroups)
if err != nil {
panic(err)
}
rootCmd.Version = version.GetVersion()
rootCmd.SilenceUsage = true
rootCmd.SilenceErrors = true
rootCmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
err := copyViperValuesToCobraCmd(cmd)
if err != nil {
return err
}
return root.preRun()
}
setupStatusHandler()
initViper()
err = rootCmd.ExecuteContext(cliCtx)
sh := status.FromContext(cliCtx)
if err != nil {
status.Error(cliCtx, err.Error())
sh.Stop()
os.Exit(1)
}
sh.Stop()
} | Downscale downscaleCmd `cmd:"" help:"Downscale all deployments"` |
1.4 palindromePermutation.py | # given a string check if its palindromePermutation
| def solution(s):
s = s.replace(' ','')
if len(s) == 0 or len (s) == 1:
return True
if len(s) == 2:
return s[0] == s[1]
hashTable = [0] * 128
for i in xrange(0,len(s)):
tmp = ord(s[i])
if hashTable[tmp] == 0:
hashTable[tmp] = 1
else:
hashTable[tmp] = 0
return sum(hashTable) < 2
def test():
assert (solution('dfjklsva ') == False),"wrong"
assert (solution('') == True),"wrong"
assert (solution('abcde fg') == False),"wrong"
assert (solution('dfsjk ls va') == False),"wrong"
assert (solution('acd eacd e') == True),"wrong"
print 'All Passed'
if __name__ == "__main__":
test() | |
amp-subscriptions.js | /**
* Copyright 2018 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {CSS} from '../../../build/amp-subscriptions-0.1.css';
import {Dialog} from './dialog';
import {DocImpl} from './doc-impl';
import {Entitlement} from './entitlement';
import {LocalSubscriptionPlatform} from './local-subscription-platform';
import {PageConfig, PageConfigResolver} from '../../../third_party/subscriptions-project/config';
import {PlatformStore} from './platform-store';
import {Renderer} from './renderer';
import {ServiceAdapter} from './service-adapter';
import {Services} from '../../../src/services';
import {SubscriptionAnalytics, SubscriptionAnalyticsEvents} from './analytics';
import {SubscriptionPlatform} from './subscription-platform';
import {ViewerSubscriptionPlatform} from './viewer-subscription-platform';
import {ViewerTracker} from './viewer-tracker';
import {dev, user} from '../../../src/log';
import {getMode} from '../../../src/mode';
import {getWinOrigin} from '../../../src/url';
import {installStylesForDoc} from '../../../src/style-installer';
import {tryParseJson} from '../../../src/json';
/** @const */
const TAG = 'amp-subscriptions';
/** @const */
const SERVICE_TIMEOUT = 3000;
export class SubscriptionService {
/**
* @param {!../../../src/service/ampdoc-impl.AmpDoc} ampdoc
*/
constructor(ampdoc) {
const configElement = ampdoc.getElementById(TAG);
/** @const @private */
this.ampdoc_ = ampdoc;
// Install styles.
installStylesForDoc(ampdoc, CSS, () => {}, false, TAG);
/** @private {?Promise} */
this.initialized_ = null;
/** @private @const {!Renderer} */
this.renderer_ = new Renderer(ampdoc);
/** @private {?PageConfig} */
this.pageConfig_ = null;
/** @private {?JsonObject} */
this.platformConfig_ = null;
/** @private {?PlatformStore} */
this.platformStore_ = null;
/** @const @private {!Element} */
this.configElement_ = user().assertElement(configElement);
/** @private {!SubscriptionAnalytics} */
this.subscriptionAnalytics_ =
new SubscriptionAnalytics(this.configElement_);
/** @private {!ServiceAdapter} */
this.serviceAdapter_ = new ServiceAdapter(this);
/** @private {!Dialog} */
this.dialog_ = new Dialog(ampdoc);
/** @private {!ViewerTracker} */
this.viewerTracker_ = new ViewerTracker(ampdoc);
/** @private @const {!../../../src/service/viewer-impl.Viewer} */
this.viewer_ = Services.viewerForDoc(ampdoc);
/** @private {?Promise} */
this.viewTrackerPromise_ = null;
/** @const @private {!../../../src/service/timer-impl.Timer} */
this.timer_ = Services.timerFor(ampdoc.win);
/** @private @const {boolean} */
this.doesViewerProvideAuth_ = this.viewer_.hasCapability('auth');
}
/**
* @return {!Promise}
* @private
*/
initialize_() {
if (!this.initialized_) {
const doc = new DocImpl(this.ampdoc_);
const pageConfigResolver = new PageConfigResolver(doc);
this.initialized_ = Promise.all([
this.getPlatformConfig_(),
pageConfigResolver.resolveConfig(),
]).then(promiseValues => {
/** @type {!JsonObject} */
this.platformConfig_ = promiseValues[0];
/** @type {!PageConfig} */
this.pageConfig_ = promiseValues[1];
});
}
return this.initialized_;
}
/**
* @param {!JsonObject} serviceConfig
* @private
*/
initializeLocalPlatforms_(serviceConfig) {
if ((serviceConfig['serviceId'] || 'local') == 'local') {
this.platformStore_.resolvePlatform('local',
new LocalSubscriptionPlatform(
this.ampdoc_,
serviceConfig,
this.serviceAdapter_,
this.subscriptionAnalytics_
)
);
}
}
/**
* @private
* @return {!Promise<!JsonObject>}
*/
getPlatformConfig_() {
return new Promise((resolve, reject) => {
const rawContent = tryParseJson(this.configElement_.textContent, e => {
reject('Failed to parse "amp-subscriptions" JSON: ' + e);
});
resolve(rawContent);
});
}
/**
* This method registers an auto initialized subcription platform with this
* service.
*
* @param {string} serviceId
* @param {function(!JsonObject, !ServiceAdapter):!SubscriptionPlatform} subscriptionPlatformFactory
*/
registerPlatform(serviceId, subscriptionPlatformFactory) {
return this.initialize_().then(() => {
if (this.doesViewerProvideAuth_) {
return; // External platforms should not register if viewer provides auth
}
const matchedServices = this.platformConfig_['services'].filter(
service => (service.serviceId || 'local') === serviceId);
const matchedServiceConfig = user().assert(matchedServices[0],
'No matching services for the ID found');
const subscriptionPlatform = subscriptionPlatformFactory(
matchedServiceConfig,
this.serviceAdapter_);
this.platformStore_.resolvePlatform(subscriptionPlatform.getServiceId(),
subscriptionPlatform);
this.subscriptionAnalytics_.serviceEvent(
SubscriptionAnalyticsEvents.PLATFORM_REGISTERED,
subscriptionPlatform.getServiceId()
);
this.fetchEntitlements_(subscriptionPlatform);
});
}
/**
* @param {boolean} grantState
* @private
*/
processGrantState_(grantState) {
this.renderer_.toggleLoading(false);
this.renderer_.setGrantState(grantState);
this.viewTrackerPromise_ = this.viewerTracker_.scheduleView(2000);
if (grantState === false) {
// TODO(@prateekbh): Show UI that no eligible entitlement found
return;
}
}
/**
* @param {string} serviceId
* @param {!./entitlement.Entitlement} entitlement
* @private
*/
resolveEntitlementsToStore_(serviceId, entitlement) {
this.platformStore_.resolveEntitlement(serviceId, entitlement);
this.subscriptionAnalytics_.serviceEvent(
SubscriptionAnalyticsEvents.ENTITLEMENT_RESOLVED,
serviceId
);
}
/**
* @param {!SubscriptionPlatform} subscriptionPlatform
* @return {!Promise}
*/
fetchEntitlements_(subscriptionPlatform) {
let timeout = SERVICE_TIMEOUT;
if (getMode().development || getMode().localDev) {
timeout = SERVICE_TIMEOUT * 2;
}
return this.viewer_.whenFirstVisible().then(() => {
return this.timer_.timeoutPromise(
timeout,
subscriptionPlatform.getEntitlements()
).then(entitlement => {
entitlement = entitlement || Entitlement.empty(
subscriptionPlatform.getServiceId());
this.resolveEntitlementsToStore_(subscriptionPlatform.getServiceId(),
entitlement);
return entitlement;
}).catch(reason => {
const serviceId = subscriptionPlatform.getServiceId();
this.platformStore_.reportPlatformFailure(serviceId);
throw user().createError(
`fetch entitlements failed for ${serviceId}`, reason
);
});
});
}
/**
* Starts the amp-subscription Service
* @return {SubscriptionService}
*/
start() {
this.initialize_().then(() => {
this.subscriptionAnalytics_.event(SubscriptionAnalyticsEvents.STARTED);
this.renderer_.toggleLoading(true);
user().assert(this.pageConfig_, 'Page config is null');
if (this.doesViewerProvideAuth_) {
this.delegateAuthToViewer_();
this.startAuthorizationFlow_(false);
return;
} else if (this.platformConfig_['alwaysGrant']) {
// If service config has `alwaysGrant` key as true, publisher wants it
// to be open always until a sviewer decides otherwise.
this.processGrantState_(true);
return;
}
user().assert(this.platformConfig_['services'],
'Services not configured in service config');
const serviceIds = this.platformConfig_['services'].map(service =>
service['serviceId'] || 'local');
this.initializePlatformStore_(serviceIds);
this.platformConfig_['services'].forEach(service => {
this.initializeLocalPlatforms_(service);
});
this.platformStore_.getAvailablePlatforms().forEach(
subscriptionPlatform => {
this.fetchEntitlements_(subscriptionPlatform);
}
);
this.startAuthorizationFlow_();
});
return this;
}
/**
* Initializes the PlatformStore with the service ids.
* @param {!Array<string>} serviceIds
*/
initializePlatformStore_(serviceIds) {
const fallbackEntitlement = this.platformConfig_['fallbackEntitlement'] ?
Entitlement.parseFromJson(this.platformConfig_['fallbackEntitlement']) :
Entitlement.empty('local');
this.platformStore_ = new PlatformStore(serviceIds,
this.platformConfig_['score'],
fallbackEntitlement);
}
/**
* Delegates authentication to viewer | */
delegateAuthToViewer_() {
const serviceIds = ['local'];
const origin = getWinOrigin(this.ampdoc_.win);
this.initializePlatformStore_(serviceIds);
this.platformConfig_['services'].forEach(service => {
if ((service['serviceId'] || 'local') == 'local') {
const viewerPlatform = new ViewerSubscriptionPlatform(
this.ampdoc_,
service,
this.serviceAdapter_,
origin,
this.subscriptionAnalytics_
);
this.platformStore_.resolvePlatform('local', viewerPlatform);
viewerPlatform.getEntitlements().then(entitlement => {
dev().assert(entitlement, 'Entitlement is null');
// Viewer authorization is redirected to use local platform instead.
this.platformStore_.resolveEntitlement('local',
/** @type {!./entitlement.Entitlement}*/ (entitlement));
});
}
});
}
/**
* Returns the singleton Dialog instance
* @return {!Dialog}
*/
getDialog() {
return this.dialog_;
}
/**
* Unblock document based on grant state and selected platform
* @param {boolean=} doPlatformSelection
* @private
*/
startAuthorizationFlow_(doPlatformSelection = true) {
this.platformStore_.getGrantStatus().then(grantState => {
this.processGrantState_(grantState);
this.performPingback_();
});
if (doPlatformSelection) {
this.selectAndActivatePlatform_();
}
}
/** @private */
selectAndActivatePlatform_() {
const requireValuesPromise = Promise.all([
this.platformStore_.getGrantStatus(),
this.platformStore_.selectPlatform(),
]);
return requireValuesPromise.then(resolvedValues => {
const selectedPlatform = resolvedValues[1];
const selectedEntitlement = this.platformStore_.getResolvedEntitlementFor(
selectedPlatform.getServiceId());
selectedPlatform.activate(selectedEntitlement);
this.subscriptionAnalytics_.serviceEvent(
SubscriptionAnalyticsEvents.PLATFORM_ACTIVATED,
selectedPlatform.getServiceId()
);
});
}
/**
* Performs pingback on local platform.
* @return {?Promise}
* @private
*/
performPingback_() {
if (this.viewTrackerPromise_) {
return this.viewTrackerPromise_.then(() => {
return this.platformStore_.getGrantEntitlement();
}).then(grantStateEntitlement => {
const localPlatform = this.platformStore_.getLocalPlatform();
if (localPlatform.isPingbackEnabled()) {
localPlatform.pingback(grantStateEntitlement
|| Entitlement.empty('local'));
}
});
}
return null;
}
/**
* Returns Page config
* @return {!PageConfig}
*/
getPageConfig() {
const pageConfig = dev().assert(this.pageConfig_,
'Page config is not yet fetched');
return /** @type {!PageConfig} */(pageConfig);
}
/**
* Re authorizes a platform
* @param {!SubscriptionPlatform} subscriptionPlatform
* @return {!Promise}
*/
reAuthorizePlatform(subscriptionPlatform) {
return this.fetchEntitlements_(subscriptionPlatform).then(() => {
this.subscriptionAnalytics_.serviceEvent(
SubscriptionAnalyticsEvents.PLATFORM_REAUTHORIZED,
subscriptionPlatform.getServiceId()
);
this.platformStore_.reset();
this.startAuthorizationFlow_();
});
}
/**
* Delegates an action to local platform.
* @param {string} action
* @return {!Promise<boolean>}
*/
delegateActionToLocal(action) {
return this.delegateActionToService(action, 'local');
}
/**
* Delegates an action to specified platform.
* @param {string} action
* @param {string} serviceId
* @return {!Promise<boolean>}
*/
delegateActionToService(action, serviceId) {
return new Promise(resolve => {
this.platformStore_.onPlatformResolves(serviceId, platform => {
dev().assert(platform, 'Platform is not registered');
this.subscriptionAnalytics_.event(
SubscriptionAnalyticsEvents.ACTION_DELEGATED,
{
action,
serviceId,
}
);
resolve(platform.executeAction(action));
});
});
}
/**
* Delegate UI decoration to another service.
* @param {!Element} element
* @param {string} serviceId
* @param {string} action
* @param {?JsonObject} options
*/
decorateServiceAction(element, serviceId, action, options) {
this.platformStore_.onPlatformResolves(serviceId, platform => {
dev().assert(platform, 'Platform is not registered');
platform.decorateUI(element, action, options);
});
}
/**
* Evaluates platforms and select the one to be selected for login.
* @return {!./subscription-platform.SubscriptionPlatform}
*/
selectPlatformForLogin() {
return this.platformStore_.selectPlatformForLogin();
}
}
/** @package @VisibleForTesting */
export function getPlatformClassForTesting() {
return SubscriptionPlatform;
}
/**
* TODO(dvoytenko): remove once compiler type checking is fixed for third_party.
* @package @VisibleForTesting
*/
export function getPageConfigClassForTesting() {
return PageConfig;
}
// Register the extension services.
AMP.extension(TAG, '0.1', function(AMP) {
AMP.registerServiceForDoc('subscriptions', function(ampdoc) {
return new SubscriptionService(ampdoc).start();
});
}); | |
init.ts | // (C) Copyright 2015 Interactive Group Pvt Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import { Component } from '@angular/core';
import { IonicPage, NavController } from 'ionic-angular';
import { SplashScreen } from '@ionic-native/splash-screen';
import { CoreAppProvider } from '@providers/app';
import { CoreInitDelegate } from '@providers/init';
import { CoreSitesProvider } from '@providers/sites';
import { CoreConstants } from '../../../constants';
import { CoreLoginHelperProvider } from '../../providers/helper';
/**
* Page that displays a "splash screen" while the app is being initialized.
*/
@IonicPage({ segment: 'core-login-init' })
@Component({
selector: 'page-core-login-init',
templateUrl: 'init.html',
})
export class | {
constructor(private navCtrl: NavController, private appProvider: CoreAppProvider, private initDelegate: CoreInitDelegate,
private sitesProvider: CoreSitesProvider, private loginHelper: CoreLoginHelperProvider,
private splashScreen: SplashScreen) { }
/**
* View loaded.
*/
ionViewDidLoad(): void {
// Wait for the app to be ready.
this.initDelegate.ready().then(() => {
// Check if there was a pending redirect.
const redirectData = this.appProvider.getRedirect();
if (redirectData.siteId) {
// Unset redirect data.
this.appProvider.storeRedirect('', '', '');
// Only accept the redirect if it was stored less than 20 seconds ago.
if (Date.now() - redirectData.timemodified < 20000) {
if (redirectData.siteId != CoreConstants.NO_SITE_ID) {
// The redirect is pointing to a site, load it.
return this.sitesProvider.loadSite(redirectData.siteId, redirectData.page, redirectData.params)
.then((loggedIn) => {
if (loggedIn) {
return this.loginHelper.goToSiteInitialPage(this.navCtrl, redirectData.page, redirectData.params,
{ animate: false });
}
}).catch(() => {
// Site doesn't exist.
return this.loadPage();
});
} else {
// No site to load, open the page.
return this.loginHelper.goToNoSitePage(this.navCtrl, redirectData.page, redirectData.params);
}
}
}
return this.loadPage();
}).then(() => {
// If we hide the splash screen now, the init view is still seen for an instant. Wait a bit to make sure it isn't seen.
setTimeout(() => {
this.splashScreen.hide();
}, 100);
});
}
/**
* Load the right page.
*
* @return Promise resolved when done.
*/
protected loadPage(): Promise<any> {
if (this.sitesProvider.isLoggedIn()) {
if (this.loginHelper.isSiteLoggedOut()) {
return this.sitesProvider.logout().then(() => {
return this.loadPage();
});
}
return this.loginHelper.goToSiteInitialPage();
}
return this.navCtrl.setRoot('CoreLoginSitesPage');
}
}
| CoreLoginInitPage |
polyline-manager.js | import { Injectable, NgZone } from '@angular/core';
import { Observable } from 'rxjs/Observable';
import { GoogleMapsAPIWrapper } from '../google-maps-api-wrapper';
export var PolylineManager = (function () {
function PolylineManager(_mapsWrapper, _zone) {
this._mapsWrapper = _mapsWrapper;
this._zone = _zone;
this._polylines = new Map();
}
PolylineManager._convertPoints = function (line) {
var path = line._getPoints().map(function (point) {
return { lat: point.latitude, lng: point.longitude };
});
return path;
};
PolylineManager.prototype.addPolyline = function (line) {
var path = PolylineManager._convertPoints(line);
var polylinePromise = this._mapsWrapper.createPolyline({
clickable: line.clickable,
draggable: line.draggable,
editable: line.editable,
geodesic: line.geodesic,
strokeColor: line.strokeColor,
strokeOpacity: line.strokeOpacity,
strokeWeight: line.strokeWeight,
visible: line.visible,
zIndex: line.zIndex,
path: path
});
this._polylines.set(line, polylinePromise);
};
PolylineManager.prototype.updatePolylinePoints = function (line) {
var _this = this;
var path = PolylineManager._convertPoints(line);
var m = this._polylines.get(line);
if (m == null) {
return Promise.resolve();
}
return m.then(function (l) { return _this._zone.run(function () { l.setPath(path); }); });
};
PolylineManager.prototype.setPolylineOptions = function (line, options) {
return this._polylines.get(line).then(function (l) { l.setOptions(options); });
};
PolylineManager.prototype.deletePolyline = function (line) {
var _this = this;
var m = this._polylines.get(line);
if (m == null) {
return Promise.resolve();
}
return m.then(function (l) {
return _this._zone.run(function () {
l.setMap(null);
_this._polylines.delete(line);
});
});
};
PolylineManager.prototype.createEventObservable = function (eventName, line) {
var _this = this;
return Observable.create(function (observer) {
_this._polylines.get(line).then(function (l) { | PolylineManager.decorators = [
{ type: Injectable },
];
/** @nocollapse */
PolylineManager.ctorParameters = function () { return [
{ type: GoogleMapsAPIWrapper, },
{ type: NgZone, },
]; };
return PolylineManager;
}());
//# sourceMappingURL=polyline-manager.js.map | l.addListener(eventName, function (e) { return _this._zone.run(function () { return observer.next(e); }); });
});
});
}; |
fileError.d.ts | /** | */
export declare class FileError extends Error {
/**
* Throws when a operation involving files occours.
* @param message Custom message for this error.
*/
constructor(message?: string);
} | * Related to a file operation error. |
machineLearningCompute.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20200901preview
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// Machine Learning compute object wrapped into ARM resource envelope.
type MachineLearningCompute struct {
pulumi.CustomResourceState
// The identity of the resource.
Identity IdentityResponsePtrOutput `pulumi:"identity"`
// Specifies the location of the resource.
Location pulumi.StringPtrOutput `pulumi:"location"`
// Specifies the name of the resource.
Name pulumi.StringOutput `pulumi:"name"`
// Compute properties
Properties pulumi.AnyOutput `pulumi:"properties"`
// The sku of the workspace.
Sku SkuResponsePtrOutput `pulumi:"sku"`
// Contains resource tags defined as key/value pairs.
Tags pulumi.StringMapOutput `pulumi:"tags"`
// Specifies the type of the resource.
Type pulumi.StringOutput `pulumi:"type"`
}
// NewMachineLearningCompute registers a new resource with the given unique name, arguments, and options.
func NewMachineLearningCompute(ctx *pulumi.Context,
name string, args *MachineLearningComputeArgs, opts ...pulumi.ResourceOption) (*MachineLearningCompute, error) {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.ResourceGroupName == nil {
return nil, errors.New("invalid value for required argument 'ResourceGroupName'")
}
if args.WorkspaceName == nil {
return nil, errors.New("invalid value for required argument 'WorkspaceName'")
}
aliases := pulumi.Aliases([]pulumi.Alias{
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20200901preview:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/latest:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/latest:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/v20180301preview:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20180301preview:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/v20181119:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20181119:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/v20190501:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20190501:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/v20190601:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20190601:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/v20191101:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20191101:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/v20200101:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20200101:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/v20200218preview:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20200218preview:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/v20200301:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20200301:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/v20200401:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20200401:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/v20200501preview:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20200501preview:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/v20200515preview:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20200515preview:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/v20200601:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20200601:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/v20200801:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20200801:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-native:machinelearningservices/v20210101:MachineLearningCompute"),
},
{
Type: pulumi.String("azure-nextgen:machinelearningservices/v20210101:MachineLearningCompute"),
},
})
opts = append(opts, aliases)
var resource MachineLearningCompute
err := ctx.RegisterResource("azure-native:machinelearningservices/v20200901preview:MachineLearningCompute", name, args, &resource, opts...)
if err != nil |
return &resource, nil
}
// GetMachineLearningCompute gets an existing MachineLearningCompute resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetMachineLearningCompute(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *MachineLearningComputeState, opts ...pulumi.ResourceOption) (*MachineLearningCompute, error) {
var resource MachineLearningCompute
err := ctx.ReadResource("azure-native:machinelearningservices/v20200901preview:MachineLearningCompute", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering MachineLearningCompute resources.
type machineLearningComputeState struct {
// The identity of the resource.
Identity *IdentityResponse `pulumi:"identity"`
// Specifies the location of the resource.
Location *string `pulumi:"location"`
// Specifies the name of the resource.
Name *string `pulumi:"name"`
// Compute properties
Properties interface{} `pulumi:"properties"`
// The sku of the workspace.
Sku *SkuResponse `pulumi:"sku"`
// Contains resource tags defined as key/value pairs.
Tags map[string]string `pulumi:"tags"`
// Specifies the type of the resource.
Type *string `pulumi:"type"`
}
type MachineLearningComputeState struct {
// The identity of the resource.
Identity IdentityResponsePtrInput
// Specifies the location of the resource.
Location pulumi.StringPtrInput
// Specifies the name of the resource.
Name pulumi.StringPtrInput
// Compute properties
Properties pulumi.Input
// The sku of the workspace.
Sku SkuResponsePtrInput
// Contains resource tags defined as key/value pairs.
Tags pulumi.StringMapInput
// Specifies the type of the resource.
Type pulumi.StringPtrInput
}
func (MachineLearningComputeState) ElementType() reflect.Type {
return reflect.TypeOf((*machineLearningComputeState)(nil)).Elem()
}
type machineLearningComputeArgs struct {
// Name of the Azure Machine Learning compute.
ComputeName *string `pulumi:"computeName"`
// The identity of the resource.
Identity *Identity `pulumi:"identity"`
// Specifies the location of the resource.
Location *string `pulumi:"location"`
// Compute properties
Properties interface{} `pulumi:"properties"`
// Name of the resource group in which workspace is located.
ResourceGroupName string `pulumi:"resourceGroupName"`
// The sku of the workspace.
Sku *Sku `pulumi:"sku"`
// Contains resource tags defined as key/value pairs.
Tags map[string]string `pulumi:"tags"`
// Name of Azure Machine Learning workspace.
WorkspaceName string `pulumi:"workspaceName"`
}
// The set of arguments for constructing a MachineLearningCompute resource.
type MachineLearningComputeArgs struct {
// Name of the Azure Machine Learning compute.
ComputeName pulumi.StringPtrInput
// The identity of the resource.
Identity IdentityPtrInput
// Specifies the location of the resource.
Location pulumi.StringPtrInput
// Compute properties
Properties pulumi.Input
// Name of the resource group in which workspace is located.
ResourceGroupName pulumi.StringInput
// The sku of the workspace.
Sku SkuPtrInput
// Contains resource tags defined as key/value pairs.
Tags pulumi.StringMapInput
// Name of Azure Machine Learning workspace.
WorkspaceName pulumi.StringInput
}
func (MachineLearningComputeArgs) ElementType() reflect.Type {
return reflect.TypeOf((*machineLearningComputeArgs)(nil)).Elem()
}
type MachineLearningComputeInput interface {
pulumi.Input
ToMachineLearningComputeOutput() MachineLearningComputeOutput
ToMachineLearningComputeOutputWithContext(ctx context.Context) MachineLearningComputeOutput
}
func (*MachineLearningCompute) ElementType() reflect.Type {
return reflect.TypeOf((*MachineLearningCompute)(nil))
}
func (i *MachineLearningCompute) ToMachineLearningComputeOutput() MachineLearningComputeOutput {
return i.ToMachineLearningComputeOutputWithContext(context.Background())
}
func (i *MachineLearningCompute) ToMachineLearningComputeOutputWithContext(ctx context.Context) MachineLearningComputeOutput {
return pulumi.ToOutputWithContext(ctx, i).(MachineLearningComputeOutput)
}
type MachineLearningComputeOutput struct {
*pulumi.OutputState
}
func (MachineLearningComputeOutput) ElementType() reflect.Type {
return reflect.TypeOf((*MachineLearningCompute)(nil))
}
func (o MachineLearningComputeOutput) ToMachineLearningComputeOutput() MachineLearningComputeOutput {
return o
}
func (o MachineLearningComputeOutput) ToMachineLearningComputeOutputWithContext(ctx context.Context) MachineLearningComputeOutput {
return o
}
func init() {
pulumi.RegisterOutputType(MachineLearningComputeOutput{})
}
| {
return nil, err
} |
user.go | // Copyright 2015 Keybase, Inc. All rights reserved. Use of
// this source code is governed by the included BSD license.
package libkb
import (
"encoding/hex"
"errors"
"fmt"
"io"
"regexp"
"time"
keybase1 "github.com/keybase/client/go/protocol/keybase1"
stellar1 "github.com/keybase/client/go/protocol/stellar1"
jsonw "github.com/keybase/go-jsonw"
)
type UserBasic interface {
GetUID() keybase1.UID
GetName() string
}
var _ UserBasic = keybase1.UserPlusKeys{}
type User struct {
// Raw JSON element read from the server or our local DB.
basics *jsonw.Wrapper
publicKeys *jsonw.Wrapper
pictures *jsonw.Wrapper
// Processed fields
id keybase1.UID
name string
sigChainMem *SigChain
idTable *IdentityTable
sigHints *SigHints
status keybase1.StatusCode
leaf MerkleUserLeaf
// Loaded from publicKeys
keyFamily *KeyFamily
// Available on partially-copied clones of the User object
ckfShallowCopy *ComputedKeyFamily
dirty bool
Contextified
}
func NewUserThin(name string, uid keybase1.UID) *User {
return &User{name: name, id: uid}
}
func newUser(g *GlobalContext, o *jsonw.Wrapper, fromStorage bool) (*User, error) {
uid, err := GetUID(o.AtKey("id"))
if err != nil {
return nil, fmt.Errorf("user object lacks an ID: %s", err)
}
name, err := o.AtKey("basics").AtKey("username").GetString()
if err != nil {
return nil, fmt.Errorf("user object for %s lacks a name", uid)
}
// This field was a late addition, so cached objects might not have it.
// If we load from storage and it wasn't there, then it's safe to assume
// it's a 0. All server replies should have this field though.
status, err := o.AtPath("basics.status").GetInt()
if err != nil {
if fromStorage {
status = SCOk
} else {
return nil, fmt.Errorf("user object for %s lacks a status field", uid)
}
}
kf, err := ParseKeyFamily(g, o.AtKey("public_keys"))
if err != nil {
return nil, err
}
return &User{
basics: o.AtKey("basics"),
publicKeys: o.AtKey("public_keys"),
pictures: o.AtKey("pictures"),
keyFamily: kf,
id: uid,
name: name,
status: keybase1.StatusCode(status),
dirty: false,
Contextified: NewContextified(g),
}, nil
}
func NewUserFromServer(g *GlobalContext, o *jsonw.Wrapper) (*User, error) {
u, e := newUser(g, o, false)
if e == nil {
u.dirty = true
}
return u, e
}
func NewUserFromLocalStorage(g *GlobalContext, o *jsonw.Wrapper) (*User, error) {
u, err := newUser(g, o, true)
return u, err
}
func (u *User) GetNormalizedName() NormalizedUsername { return NewNormalizedUsername(u.name) }
func (u *User) GetName() string { return u.name }
func (u *User) GetUID() keybase1.UID { return u.id }
func (u *User) GetStatus() keybase1.StatusCode { return u.status }
func (u *User) GetSalt() (salt []byte, err error) {
saltHex, err := u.basics.AtKey("salt").GetString()
if err != nil {
return nil, err
}
salt, err = hex.DecodeString(saltHex)
if err != nil {
return nil, err
}
return salt, nil
}
func (u *User) GetIDVersion() (int64, error) {
return u.basics.AtKey("id_version").GetInt64()
}
func (u *User) GetSigChainLastKnownSeqno() keybase1.Seqno {
if u.sigChain() == nil {
return 0
}
return u.sigChain().GetLastKnownSeqno()
}
func (u *User) GetSigChainLastKnownID() LinkID {
if u.sigChain() == nil {
return nil
}
return u.sigChain().GetLastKnownID()
}
func (u *User) GetCurrentEldestSeqno() keybase1.Seqno {
if u.sigChain() == nil {
// Note that NameWithEldestSeqno will return an error if you call it with zero.
return 0
}
return u.sigChain().currentSubchainStart
}
func (u *User) GetExpectedNextHighSkip(mctx MetaContext) (HighSkip, error) {
if u.sigChain() == nil {
return NewInitialHighSkip(), nil
}
return u.sigChain().GetExpectedNextHighSkip(mctx, u.GetUID())
}
func (u *User) GetLastLink() *ChainLink {
if u.sigChain() == nil {
return nil
}
return u.sigChain().GetLastLink()
}
func (u *User) ToUserVersion() keybase1.UserVersion {
return keybase1.UserVersion{
Uid: u.GetUID(),
EldestSeqno: u.GetCurrentEldestSeqno(),
}
}
func (u *User) IsNewerThan(v *User) (bool, error) {
var idvU, idvV int64
var err error
idvU, err = u.GetIDVersion()
if err != nil {
return false, err
}
idvV, err = v.GetIDVersion()
if err != nil {
return false, err
}
return ((idvU > idvV && u.GetSigChainLastKnownSeqno() >= v.GetSigChainLastKnownSeqno()) ||
(idvU >= idvV && u.GetSigChainLastKnownSeqno() > v.GetSigChainLastKnownSeqno())), nil
}
func (u *User) GetKeyFamily() *KeyFamily {
return u.keyFamily
}
func (u *User) GetComputedKeyInfos() *ComputedKeyInfos {
if u.sigChain() == nil {
return nil
}
return u.sigChain().GetComputedKeyInfos()
}
func (u *User) GetSigHintsVersion() int {
if u.sigHints == nil {
return 0
}
return u.sigHints.version
}
func (u *User) GetComputedKeyFamily() (ret *ComputedKeyFamily) {
if u.sigChain() != nil && u.keyFamily != nil {
cki := u.sigChain().GetComputedKeyInfos()
if cki == nil {
return nil
}
ret = &ComputedKeyFamily{cki: cki, kf: u.keyFamily, Contextified: u.Contextified}
} else if u.ckfShallowCopy != nil {
ret = u.ckfShallowCopy
}
return ret
}
// GetActivePGPKeys looks into the user's ComputedKeyFamily and
// returns only the active PGP keys. If you want only sibkeys, then
// specify sibkey=true.
func (u *User) GetActivePGPKeys(sibkey bool) (ret []*PGPKeyBundle) {
if ckf := u.GetComputedKeyFamily(); ckf != nil {
ret = ckf.GetActivePGPKeys(sibkey)
}
return
}
// FilterActivePGPKeys returns the active pgp keys that match
// query.
func (u *User) FilterActivePGPKeys(sibkey bool, query string) []*PGPKeyBundle {
keys := u.GetActivePGPKeys(sibkey)
var res []*PGPKeyBundle
for _, k := range keys {
if KeyMatchesQuery(k, query, false) {
res = append(res, k)
}
}
return res
}
// GetActivePGPFingerprints looks into the user's ComputedKeyFamily and
// returns only the fingerprint of the active PGP keys.
// If you want only sibkeys, then // specify sibkey=true.
func (u *User) GetActivePGPFingerprints(sibkey bool) (ret []PGPFingerprint) {
for _, pgp := range u.GetActivePGPKeys(sibkey) {
ret = append(ret, pgp.GetFingerprint())
}
return
}
func (u *User) GetActivePGPKIDs(sibkey bool) (ret []keybase1.KID) {
for _, pgp := range u.GetActivePGPKeys(sibkey) {
ret = append(ret, pgp.GetKID())
}
return
}
func (u *User) GetDeviceSibkey() (GenericKey, error) {
did := u.G().Env.GetDeviceIDForUsername(u.GetNormalizedName())
if did.IsNil() {
return nil, NotProvisionedError{}
}
ckf := u.GetComputedKeyFamily()
if ckf == nil {
return nil, KeyFamilyError{"no key family available"}
}
return ckf.GetSibkeyForDevice(did)
}
func (u *User) GetDeviceSubkey() (subkey GenericKey, err error) {
ckf := u.GetComputedKeyFamily()
if ckf == nil {
err = KeyFamilyError{"no key family available"}
return
}
did := u.G().Env.GetDeviceIDForUsername(u.GetNormalizedName())
if did.IsNil() {
err = NotProvisionedError{}
return
}
return ckf.GetEncryptionSubkeyForDevice(did)
}
func (u *User) HasEncryptionSubkey() bool {
if ckf := u.GetComputedKeyFamily(); ckf != nil {
return ckf.HasActiveEncryptionSubkey()
}
return false
}
func (u *User) CheckBasicsFreshness(server int64) (current bool, reason string, err error) {
var stored int64
if stored, err = u.GetIDVersion(); err != nil {
return false, "", err
}
if stored >= server {
u.G().Log.Debug("| Local basics version is up-to-date @ version %d", stored)
return true, "", nil
}
u.G().Log.Debug("| Local basics version is out-of-date: %d < %d", stored, server)
return false, fmt.Sprintf("idv %v < %v", stored, server), nil
}
func (u *User) StoreSigChain(m MetaContext) error {
var err error
if u.sigChain() != nil {
err = u.sigChain().Store(m)
}
return err
}
func (u *User) LoadSigChains(m MetaContext, f *MerkleUserLeaf, self bool, stubMode StubMode) (err error) {
defer TimeLog(fmt.Sprintf("LoadSigChains: %s", u.name), u.G().Clock().Now(), u.G().Log.Debug)
loader := SigChainLoader{
user: u,
self: self,
leaf: f,
chainType: PublicChain,
preload: u.sigChain(),
stubMode: stubMode,
MetaContextified: NewMetaContextified(m),
}
u.sigChainMem, err = loader.Load()
// Eventually load the others, but for now, this one is good enough
return err
}
func (u *User) Store(m MetaContext) error {
m.Debug("+ Store user %s", u.name)
// These might be dirty, in which case we can write it back
// to local storage. Note, this can be dirty even if the user is clean.
if err := u.sigHints.Store(m); err != nil {
return err
}
if !u.dirty {
m.Debug("- Store for %s skipped; user wasn't dirty", u.name)
return nil
}
if err := u.StoreSigChain(m); err != nil {
return err
}
if err := u.StoreTopLevel(m); err != nil {
return err
}
u.dirty = false
m.Debug("- Store user %s -> OK", u.name)
return nil
}
func (u *User) StoreTopLevel(m MetaContext) error {
jw := jsonw.NewDictionary()
err := jw.SetKey("id", UIDWrapper(u.id))
if err != nil {
return err
}
err = jw.SetKey("basics", u.basics)
if err != nil {
return err
}
err = jw.SetKey("public_keys", u.publicKeys)
if err != nil {
return err
}
err = jw.SetKey("pictures", u.pictures)
if err != nil {
return err
}
err = u.G().LocalDb.Put(
DbKeyUID(DBUser, u.id),
[]DbKey{{Typ: DBLookupUsername, Key: u.name}},
jw,
)
if err != nil {
m.Debug("StoreTopLevel -> %s", ErrToOk(err))
}
return err
}
func (u *User) SyncedSecretKey(m MetaContext) (ret *SKB, err error) {
if lctx := m.LoginContext(); lctx != nil {
return u.getSyncedSecretKeyLogin(m, lctx)
}
return u.GetSyncedSecretKey(m)
}
func (u *User) getSyncedSecretKeyLogin(m MetaContext, lctx LoginContext) (ret *SKB, err error) {
defer m.Trace("User#getSyncedSecretKeyLogin", func() error { return err })()
if err = lctx.RunSecretSyncer(m, u.id); err != nil {
return
}
ckf := u.GetComputedKeyFamily()
if ckf == nil {
m.Debug("| short-circuit; no Computed key family")
return
}
ret, err = lctx.SecretSyncer().FindActiveKey(ckf)
return
}
func (u *User) SyncedSecretKeyWithSka(m MetaContext, ska SecretKeyArg) (ret *SKB, err error) {
keys, err := u.GetSyncedSecretKeys(m)
if err != nil {
return nil, err
}
var errors []error
for _, key := range keys {
pub, err := key.GetPubKey()
if err != nil {
errors = append(errors, err)
continue
}
if KeyMatchesQuery(pub, ska.KeyQuery, ska.ExactMatch) {
return key, nil
}
}
if len(errors) > 0 {
// No matching key found and we hit errors.
return nil, CombineErrors(errors...)
}
return nil, NoSecretKeyError{}
}
func (u *User) GetSyncedSecretKey(m MetaContext) (ret *SKB, err error) {
defer m.Trace("User#GetSyncedSecretKey", func() error { return err })()
skbs, err := u.GetSyncedSecretKeys(m)
if err != nil {
return nil, err
}
if len(skbs) == 0 {
return nil, nil
}
m.Debug("NOTE: using GetSyncedSecretKey, returning first secret key from randomly ordered map")
return skbs[0], nil
}
func (u *User) GetSyncedSecretKeys(m MetaContext) (ret []*SKB, err error) {
defer m.Trace("User#GetSyncedSecretKeys", func() error { return err })()
if err = u.SyncSecrets(m); err != nil {
return
}
ckf := u.GetComputedKeyFamily()
if ckf == nil {
m.Debug("| short-circuit; no Computed key family")
return
}
syncer, err := m.SyncSecrets()
if err != nil {
return nil, err
}
ret, err = syncer.FindActiveKeys(ckf)
return ret, err
}
// AllSyncedSecretKeys returns all the PGP key blocks that were
// synced to API server. LoginContext can be nil if this isn't
// used while logging in, signing up.
func (u *User) AllSyncedSecretKeys(m MetaContext) (keys []*SKB, err error) {
defer m.Trace("User#AllSyncedSecretKeys", func() error { return err })()
m.Dump()
ss, err := m.SyncSecretsForUID(u.GetUID())
if err != nil {
return nil, err
}
ckf := u.GetComputedKeyFamily()
if ckf == nil {
m.Debug("| short-circuit; no Computed key family")
return nil, nil
}
keys = ss.AllActiveKeys(ckf)
return keys, nil
}
func (u *User) SyncSecrets(m MetaContext) error {
_, err := m.SyncSecretsForUID(u.GetUID())
return err
}
// May return an empty KID
func (u *User) GetEldestKID() (ret keybase1.KID) {
return u.leaf.eldest
}
func (u *User) GetPublicChainTail() *MerkleTriple {
if u.sigChainMem == nil {
return nil
}
return u.sigChain().GetCurrentTailTriple()
}
func (u *User) IDTable() *IdentityTable {
return u.idTable
}
// Return the active stellar public address for a user.
// Returns nil if there is none or it has not been loaded.
func (u *User) StellarAccountID() *stellar1.AccountID {
if u.idTable == nil {
return nil
}
return u.idTable.StellarAccountID()
}
func (u *User) sigChain() *SigChain {
return u.sigChainMem
}
func (u *User) MakeIDTable(m MetaContext) error {
kid := u.GetEldestKID()
if kid.IsNil() {
return NoKeyError{"Expected a key but didn't find one"}
}
idt, err := NewIdentityTable(m, kid, u.sigChain(), u.sigHints)
if err != nil {
return err
}
u.idTable = idt
return nil
}
// GetHighLinkSeqnos gets the list of all high links in the user's sigchain ascending.
func (u *User) GetHighLinkSeqnos(mctx MetaContext) (res []keybase1.Seqno, err error) {
sigChain := u.sigChain() | for _, c := range sigChain.chainLinks {
high, err := c.IsHighUserLink(mctx, u.GetUID())
if err != nil {
return nil, fmt.Errorf("error determining link %v", c.GetSeqno())
}
if high {
res = append(res, c.GetSeqno())
}
}
return res, nil
}
func (u *User) VerifySelfSig() error {
u.G().Log.Debug("+ VerifySelfSig for user %s", u.name)
if u.IDTable().VerifySelfSig(u.GetNormalizedName(), u.id) {
u.G().Log.Debug("- VerifySelfSig via SigChain")
return nil
}
if u.VerifySelfSigByKey() {
u.G().Log.Debug("- VerifySelfSig via Key")
return nil
}
u.G().Log.Debug("- VerifySelfSig failed")
return fmt.Errorf("Failed to find a self-signature for %s", u.name)
}
func (u *User) VerifySelfSigByKey() (ret bool) {
name := u.GetName()
if ckf := u.GetComputedKeyFamily(); ckf != nil {
ret = ckf.FindKeybaseName(name)
}
return
}
func (u *User) HasActiveKey() (ret bool) {
u.G().Log.Debug("+ HasActiveKey")
defer func() {
u.G().Log.Debug("- HasActiveKey -> %v", ret)
}()
if u.GetEldestKID().IsNil() {
u.G().Log.Debug("| no eldest KID; must have reset or be new")
ret = false
return
}
if ckf := u.GetComputedKeyFamily(); ckf != nil {
u.G().Log.Debug("| Checking user's ComputedKeyFamily")
ret = ckf.HasActiveKey()
return
}
if u.sigChain() == nil {
u.G().Log.Debug("User HasActiveKey: sig chain is nil")
} else if u.sigChain().GetComputedKeyInfos() == nil {
u.G().Log.Debug("User HasActiveKey: comp key infos is nil")
}
if u.keyFamily == nil {
u.G().Log.Debug("User HasActiveKey: keyFamily is nil")
}
return false
}
func (u *User) Equal(other *User) bool {
return u.id == other.id
}
func (u *User) TmpTrackChainLinkFor(m MetaContext, username string, uid keybase1.UID) (tcl *TrackChainLink, err error) {
return TmpTrackChainLinkFor(m, u.id, uid)
}
func TmpTrackChainLinkFor(m MetaContext, me keybase1.UID, them keybase1.UID) (tcl *TrackChainLink, err error) {
m.Debug("+ TmpTrackChainLinkFor for %s", them)
tcl, err = LocalTmpTrackChainLinkFor(m, me, them)
m.Debug("- TmpTrackChainLinkFor for %s -> %v, %v", them, (tcl != nil), err)
return tcl, err
}
func (u *User) TrackChainLinkFor(m MetaContext, username NormalizedUsername, uid keybase1.UID) (*TrackChainLink, error) {
u.G().Log.Debug("+ TrackChainLinkFor for %s", uid)
defer u.G().Log.Debug("- TrackChainLinkFor for %s", uid)
remote, e1 := u.remoteTrackChainLinkFor(username, uid)
return TrackChainLinkFor(m, u.id, uid, remote, e1)
}
func TrackChainLinkFor(m MetaContext, me keybase1.UID, them keybase1.UID, remote *TrackChainLink, remoteErr error) (*TrackChainLink, error) {
local, e2 := LocalTrackChainLinkFor(m, me, them)
m.Debug("| Load remote -> %v", (remote != nil))
m.Debug("| Load local -> %v", (local != nil))
if remoteErr != nil && e2 != nil {
return nil, remoteErr
}
if local == nil && remote == nil {
return nil, nil
}
if local == nil && remote != nil {
return remote, nil
}
if remote == nil && local != nil {
m.Debug("local expire %v: %s", local.tmpExpireTime.IsZero(), local.tmpExpireTime)
return local, nil
}
if remote.GetCTime().After(local.GetCTime()) {
m.Debug("| Returning newer remote")
return remote, nil
}
return local, nil
}
func (u *User) remoteTrackChainLinkFor(username NormalizedUsername, uid keybase1.UID) (*TrackChainLink, error) {
if u.IDTable() == nil {
return nil, nil
}
return u.IDTable().TrackChainLinkFor(username, uid)
}
// BaseProofSet creates a basic proof set for a user with their
// keybase and uid proofs and any pgp fingerprint proofs.
func (u *User) BaseProofSet() *ProofSet {
proofs := []Proof{
{Key: "keybase", Value: u.name},
{Key: "uid", Value: u.id.String()},
}
for _, fp := range u.GetActivePGPFingerprints(true) {
proofs = append(proofs, Proof{Key: PGPAssertionKey, Value: fp.String()})
}
return NewProofSet(proofs)
}
// localDelegateKey takes the given GenericKey and provisions it locally so that
// we can use the key without needing a refresh from the server. The eventual
// refresh we do get from the server will clobber our work here.
func (u *User) localDelegateKey(key GenericKey, sigID keybase1.SigID, kid keybase1.KID, isSibkey bool, isEldest bool, merkleHashMeta keybase1.HashMeta, firstAppearedUnverified keybase1.Seqno) (err error) {
if err = u.keyFamily.LocalDelegate(key); err != nil {
return
}
if u.sigChain() == nil {
err = NoSigChainError{}
return
}
err = u.sigChain().LocalDelegate(u.keyFamily, key, sigID, kid, isSibkey, merkleHashMeta, firstAppearedUnverified)
if isEldest {
eldestKID := key.GetKID()
u.leaf.eldest = eldestKID
}
return
}
func (u *User) localDelegatePerUserKey(perUserKey keybase1.PerUserKey) error {
// Don't update the u.keyFamily. It doesn't manage per-user-keys.
// Update sigchain which will update ckf/cki
err := u.sigChain().LocalDelegatePerUserKey(perUserKey)
if err != nil {
return err
}
u.G().Log.Debug("User LocalDelegatePerUserKey gen:%v seqno:%v sig:%v enc:%v",
perUserKey.Gen, perUserKey.Seqno, perUserKey.SigKID.String(), perUserKey.EncKID.String())
return nil
}
// SigChainBump is called during a multikey post to update the correct seqno, hash, and
// high skip. When a delegator posts a high link, they specify isHighDelegator=true
// in order to set the new high skip pointer to the delegator's link, so subsequent
// keys in the multikey will supply the correct high skip.
func (u *User) SigChainBump(linkID LinkID, sigID keybase1.SigID, isHighDelegator bool) {
u.SigChainBumpMT(MerkleTriple{LinkID: linkID, SigID: sigID}, isHighDelegator)
}
func (u *User) SigChainBumpMT(mt MerkleTriple, isHighDelegator bool) {
u.sigChain().Bump(mt, isHighDelegator)
}
func (u *User) GetDevice(id keybase1.DeviceID) (*Device, error) {
if u.GetComputedKeyFamily() == nil {
return nil, fmt.Errorf("no computed key family")
}
device, exists := u.GetComputedKeyFamily().cki.Devices[id]
if !exists {
return nil, fmt.Errorf("device %s doesn't exist", id)
}
return device, nil
}
func (u *User) DeviceNames() ([]string, error) {
ckf := u.GetComputedKeyFamily()
if ckf == nil {
return nil, fmt.Errorf("no computed key family")
}
if ckf.cki == nil {
return nil, fmt.Errorf("no computed key infos")
}
var names []string
for _, device := range ckf.cki.Devices {
if device.Description == nil {
continue
}
names = append(names, *device.Description)
}
return names, nil
}
// Returns whether or not the current install has an active device
// sibkey.
func (u *User) HasDeviceInCurrentInstall(did keybase1.DeviceID) bool {
ckf := u.GetComputedKeyFamily()
if ckf == nil {
return false
}
_, err := ckf.GetSibkeyForDevice(did)
return err == nil
}
func (u *User) HasCurrentDeviceInCurrentInstall() bool {
did := u.G().Env.GetDeviceIDForUsername(u.GetNormalizedName())
if did.IsNil() {
return false
}
return u.HasDeviceInCurrentInstall(did)
}
func (u *User) SigningKeyPub() (GenericKey, error) {
// Get our key that we're going to sign with.
arg := SecretKeyArg{
Me: u,
KeyType: DeviceSigningKeyType,
}
key := u.G().ActiveDevice.SigningKeyForUID(u.GetUID())
if key != nil {
return key, nil
}
lockedKey, err := u.G().Keyrings.GetSecretKeyLocked(NewMetaContextTODO(u.G()), arg)
if err != nil {
return nil, err
}
pubKey, err := lockedKey.GetPubKey()
if err != nil {
return nil, err
}
return pubKey, nil
}
func (u *User) GetSigIDFromSeqno(seqno keybase1.Seqno) keybase1.SigID {
if u.sigChain() == nil {
return ""
}
link := u.sigChain().GetLinkFromSeqno(seqno)
if link == nil {
return ""
}
return link.GetSigID()
}
func (u *User) IsSigIDActive(sigID keybase1.SigID) (bool, error) {
if u.sigChain() == nil {
return false, fmt.Errorf("User's sig chain is nil.")
}
link := u.sigChain().GetLinkFromSigID(sigID)
if link == nil {
return false, fmt.Errorf("Signature with ID '%s' does not exist.", sigID)
}
if link.revoked {
return false, fmt.Errorf("Signature ID '%s' is already revoked.", sigID)
}
return true, nil
}
func (u *User) SigIDSearch(query string) (keybase1.SigID, error) {
if u.sigChain() == nil {
return "", fmt.Errorf("User's sig chain is nil.")
}
link := u.sigChain().GetLinkFromSigIDQuery(query)
if link == nil {
return "", fmt.Errorf("Signature matching query %q does not exist.", query)
}
if link.revoked {
return "", fmt.Errorf("Signature ID '%s' is already revoked.", link.GetSigID())
}
return link.GetSigID(), nil
}
func (u *User) LinkFromSigID(sigID keybase1.SigID) *ChainLink {
return u.sigChain().GetLinkFromSigID(sigID)
}
func (u *User) SigChainDump(w io.Writer) {
u.sigChain().Dump(w)
}
func (u *User) IsCachedIdentifyFresh(upk *keybase1.UserPlusKeysV2AllIncarnations) bool {
idv, _ := u.GetIDVersion()
if upk.Uvv.Id == 0 || idv != upk.Uvv.Id {
return false
}
shv := u.GetSigHintsVersion()
if upk.Uvv.SigHints == 0 || shv != upk.Uvv.SigHints {
return false
}
scv := u.GetSigChainLastKnownSeqno()
if upk.Uvv.SigChain == 0 || int64(scv) != upk.Uvv.SigChain {
return false
}
return true
}
func LoadHasRandomPw(mctx MetaContext, arg keybase1.LoadHasRandomPwArg) (res bool, err error) {
mctx = mctx.WithLogTag("HASRPW")
defer mctx.TraceTimed(fmt.Sprintf("User#LoadHasRandomPw(forceRepoll=%t)", arg.ForceRepoll), func() error { return err })()
currentUID := mctx.CurrentUID()
cacheKey := DbKey{
Typ: DBHasRandomPW,
Key: currentUID.String(),
}
var cachedValue, hasCache bool
if !arg.ForceRepoll {
if hasCache, err = mctx.G().GetKVStore().GetInto(&cachedValue, cacheKey); err == nil {
if hasCache && !cachedValue {
mctx.Debug("Returning HasRandomPW=false from KVStore cache")
return false, nil
}
// If it was never cached or user *IS* RandomPW right now, pass through
// and call the API.
} else {
mctx.Debug("Unable to get cached value for HasRandomPW: %v", err)
}
}
var initialTimeout time.Duration
if !arg.ForceRepoll && !arg.NoShortTimeout {
// If we are do not need accurate response from the API server, make
// the request with a timeout for quicker overall RPC response time
// if network is bad/unavailable.
initialTimeout = 3 * time.Second
}
var ret struct {
AppStatusEmbed
RandomPW bool `json:"random_pw"`
}
err = mctx.G().API.GetDecode(mctx, APIArg{
Endpoint: "user/has_random_pw",
SessionType: APISessionTypeREQUIRED,
InitialTimeout: initialTimeout,
}, &ret)
if err != nil {
if !arg.ForceRepoll {
if hasCache {
// We are allowed to return cache if we have any.
mctx.Warning("Unable to make a network request to has_random_pw. Returning cached value: %t. Error: %s.", cachedValue, err)
return cachedValue, nil
}
mctx.Warning("Unable to make a network request to has_random_pw and there is no cache. Erroring out: %s.", err)
}
return res, err
}
if !hasCache || cachedValue != ret.RandomPW {
// Cache current state. If we put `randomPW=false` in the cache, we will never
// ever have to call to the network from this device, because it's not possible
// to become `randomPW=true` again. If we cache `randomPW=true` we are going to
// keep asking the network, but we will be resilient to bad network conditions
// because we will have this cached state to fall back on.
if err := mctx.G().GetKVStore().PutObj(cacheKey, nil, ret.RandomPW); err == nil {
mctx.Debug("Adding HasRandomPW=%t to KVStore", ret.RandomPW)
} else {
mctx.Debug("Unable to add HasRandomPW state to KVStore")
}
}
return ret.RandomPW, err
}
func CanLogout(mctx MetaContext) (res keybase1.CanLogoutRes) {
if !mctx.G().ActiveDevice.Valid() {
mctx.Debug("CanLogout: looks like user is not logged in")
res.CanLogout = true
return res
}
if err := CheckCurrentUIDDeviceID(mctx); err != nil {
switch err.(type) {
case DeviceNotFoundError, UserNotFoundError,
KeyRevokedError, NoDeviceError, NoUIDError:
mctx.Debug("CanLogout: allowing logout because of CheckCurrentUIDDeviceID returning: %s", err.Error())
return keybase1.CanLogoutRes{CanLogout: true}
default:
// Unexpected error like network connectivity issue, fall through.
// Even if we are offline here, we may be able to get cached value
// `false` from LoadHasRandomPw and be allowed to log out.
mctx.Debug("CanLogout: CheckCurrentUIDDeviceID returned: %q, falling through", err.Error())
}
}
hasRandomPW, err := LoadHasRandomPw(mctx, keybase1.LoadHasRandomPwArg{
ForceRepoll: false,
})
if err != nil {
return keybase1.CanLogoutRes{
CanLogout: false,
Reason: fmt.Sprintf("We couldn't ensure that your account has a passphrase: %s", err.Error()),
}
}
if hasRandomPW {
return keybase1.CanLogoutRes{
CanLogout: false,
SetPassphrase: true,
Reason: "You signed up without a password and need to set a password first",
}
}
res.CanLogout = true
return res
}
// PartialCopy copies some fields of the User object, but not all.
// For instance, it doesn't copy the SigChain or IDTable, and it only
// makes a shallow copy of the ComputedKeyFamily.
func (u User) PartialCopy() *User {
ret := &User{
Contextified: NewContextified(u.G()),
id: u.id,
name: u.name,
leaf: u.leaf,
dirty: false,
}
if ckf := u.GetComputedKeyFamily(); ckf != nil {
ret.ckfShallowCopy = ckf.ShallowCopy()
ret.keyFamily = ckf.kf
} else if u.keyFamily != nil {
ret.keyFamily = u.keyFamily.ShallowCopy()
}
return ret
}
func ValidateNormalizedUsername(username string) (NormalizedUsername, error) {
res := NormalizedUsername(username)
if len(username) < 2 {
return res, errors.New("username too short")
}
if len(username) > 16 {
return res, errors.New("username too long")
}
// underscores allowed, just not first or doubled
re := regexp.MustCompile(`^([a-z0-9][a-z0-9_]?)+$`)
if !re.MatchString(username) {
return res, errors.New("invalid username")
}
return res, nil
}
type UserForSignatures struct {
uid keybase1.UID
name NormalizedUsername
eldestKID keybase1.KID
eldestSeqno keybase1.Seqno
latestPUK *keybase1.PerUserKey
}
func (u UserForSignatures) GetUID() keybase1.UID { return u.uid }
func (u UserForSignatures) GetName() string { return u.name.String() }
func (u UserForSignatures) GetEldestKID() keybase1.KID { return u.eldestKID }
func (u UserForSignatures) GetEldestSeqno() keybase1.Seqno { return u.eldestSeqno }
func (u UserForSignatures) GetNormalizedName() NormalizedUsername { return u.name }
func (u UserForSignatures) ToUserVersion() keybase1.UserVersion {
return keybase1.UserVersion{Uid: u.uid, EldestSeqno: u.eldestSeqno}
}
func (u UserForSignatures) GetLatestPerUserKey() *keybase1.PerUserKey { return u.latestPUK }
func (u *User) ToUserForSignatures() (ret UserForSignatures) {
if u == nil {
return ret
}
ret.uid = u.GetUID()
ret.name = u.GetNormalizedName()
ret.eldestKID = u.GetEldestKID()
ret.eldestSeqno = u.GetCurrentEldestSeqno()
ret.latestPUK = u.GetComputedKeyFamily().GetLatestPerUserKey()
return ret
}
var _ UserBasic = UserForSignatures{} | if sigChain == nil {
return nil, fmt.Errorf("no user sigchain")
} |
md_rollout.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"os"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
)
// MachineDeploymentRolloutSpecInput is the input for MachineDeploymentRolloutSpec.
type MachineDeploymentRolloutSpecInput struct {
E2EConfig *clusterctl.E2EConfig
ClusterctlConfigPath string
BootstrapClusterProxy framework.ClusterProxy
ArtifactFolder string
SkipCleanup bool
Flavor string
}
// MachineDeploymentRolloutSpec implements a test that verifies that MachineDeployment rolling updates are successful.
func | (ctx context.Context, inputGetter func() MachineDeploymentRolloutSpecInput) {
var (
specName = "md-rollout"
input MachineDeploymentRolloutSpecInput
namespace *corev1.Namespace
cancelWatches context.CancelFunc
clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
)
BeforeEach(func() {
Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)
input = inputGetter()
Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName)
Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName)
Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName)
Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName)
Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion))
Expect(input.E2EConfig.Variables).To(HaveValidVersion(input.E2EConfig.GetVariable(KubernetesVersion)))
// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)
clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
})
It("Should successfully upgrade Machines upon changes in relevant MachineDeployment fields", func() {
By("Creating a workload cluster")
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
ClusterctlConfigPath: input.ClusterctlConfigPath,
KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: input.Flavor,
Namespace: namespace.Name,
ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(1),
},
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
}, clusterResources)
By("Upgrading MachineDeployment Infrastructure ref and wait for rolling upgrade")
framework.UpgradeMachineDeploymentInfrastructureRefAndWait(ctx, framework.UpgradeMachineDeploymentInfrastructureRefAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: clusterResources.Cluster,
WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
MachineDeployments: clusterResources.MachineDeployments,
})
By("PASSED!")
})
AfterEach(func() {
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
})
}
| MachineDeploymentRolloutSpec |
urls.py | """profiles_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) | """
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path("api/", include('profiles_api.urls')),
] | |
bootstrap-datetimepicker.pt.js | /**
* Portuguese translation for bootstrap-datetimepicker
* Original code: Cauan Cabral <[email protected]>
* Tiago Melo <[email protected]>
*/ | daysShort: ["Dom", "Seg", "Ter", "Qua", "Qui", "Sex", "Sáb", "Dom"],
daysMin: ["Do", "Se", "Te", "Qu", "Qu", "Se", "Sa", "Do"],
months: ["Janeiro", "Fevereiro", "Março", "Abril", "Maio", "Junho", "Julho", "Agosto", "Setembro", "Outubro", "Novembro", "Dezembro"],
monthsShort: ["Jan", "Fev", "Mar", "Abr", "Mai", "Jun", "Jul", "Ago", "Set", "Out", "Nov", "Dez"],
suffix: [],
meridiem: ["am", "pm"],
today: "Hoje"
};
}(jQuery)); | ;(function ($) {
$.fn.datetimepicker.dates['pt'] = {
days: ["Domingo", "Segunda", "Terça", "Quarta", "Quinta", "Sexta", "Sábado", "Domingo"], |
test_hhslib.py | """
@package: jsonutils
@script: test_JsonUtils.py
@purpose: Test Suite for JsonUtils.
@created: Aug 26, 2017
@author: <B>H</B>ugo <B>S</B>aporetti <B>J</B>unior
@mailto: [email protected]
@site: https://github.com/yorevs/homesetup
@license: Please refer to <https://opensource.org/licenses/MIT>
"""
import os
import unittest
from hhslib.security import *
PASSPHRASE = '12345'
SAMPLE_IN_FILE_NAME = "resources/secret.in"
SAMPLE_OUT_FILE_NAME = "resources/secret.out"
OUT_FILE = "resources/outfile.out"
OUT_FILE_GPG = "resources/outfile.out.gpg"
ORIGINAL_FILE_CONTENTS = "HomeSetup Secrets"
ENCODED_FILE_CONTENTS = "SG9tZVNldHVwIFNlY3JldHM="
class TestHhsLib(unittest.TestCase):
# Setup tests
def setUp(self):
with open(SAMPLE_IN_FILE_NAME, 'w') as f_in:
f_in.write(ORIGINAL_FILE_CONTENTS)
with open(SAMPLE_OUT_FILE_NAME, 'w') as f_in:
f_in.write(ENCODED_FILE_CONTENTS)
# Teardown tests
def tearDown(self):
if os.path.exists(OUT_FILE):
os.remove(OUT_FILE)
if os.path.exists(OUT_FILE_GPG):
os.remove(OUT_FILE_GPG)
# TEST CASES ----------
# TC1 - Test encoding a file.
def test_should_encode_file(self):
with open(SAMPLE_IN_FILE_NAME, 'r') as f_in:
contents = str(f_in.read().strip())
self.assertEquals(ORIGINAL_FILE_CONTENTS, contents)
encode(SAMPLE_IN_FILE_NAME, OUT_FILE)
with open(OUT_FILE, 'r') as f_out:
contents = str(f_out.read().strip())
self.assertEquals(ENCODED_FILE_CONTENTS, contents)
# TC2 - Test decoding a file.
def test_should_decode_file(self):
|
# TC3 - Test encrypting a file.
def test_should_encrypt_decrypt_file(self):
with open(SAMPLE_IN_FILE_NAME, 'r') as f_in:
contents = str(f_in.read().strip())
self.assertEquals(ORIGINAL_FILE_CONTENTS, contents)
encrypt(SAMPLE_IN_FILE_NAME, OUT_FILE_GPG, PASSPHRASE)
decrypt(OUT_FILE_GPG, OUT_FILE, PASSPHRASE)
with open(OUT_FILE, 'r') as f_out:
contents = str(f_out.read().strip())
self.assertEquals(ORIGINAL_FILE_CONTENTS, contents)
# Program entry point.
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestHhsLib)
unittest.TextTestRunner(verbosity=2).run(suite)
| with open(SAMPLE_OUT_FILE_NAME, 'r') as f_in:
contents = str(f_in.read().strip())
self.assertEquals(ENCODED_FILE_CONTENTS, contents)
decode(SAMPLE_OUT_FILE_NAME, OUT_FILE)
with open(OUT_FILE, 'r') as f_out:
contents = str(f_out.read().strip())
self.assertEquals(ORIGINAL_FILE_CONTENTS, contents) |
base-accordions.component.ts | import { Component, OnInit, ViewEncapsulation } from '@angular/core';
import { Helpers } from '../../../../../../helpers';
@Component({
selector: "app-base-accordions",
templateUrl: "./base-accordions.component.html",
encapsulation: ViewEncapsulation.None,
})
export class BaseAccordionsComponent implements OnInit {
constructor() { | ngOnInit() {
}
} |
} |
Rodal.tsx | import React from 'react';
import cn from 'classnames';
import Dialog from './Dialog';
import './rodal.css';
// env
const IN_BROWSER = typeof window !== 'undefined';
const UA = IN_BROWSER && window.navigator.userAgent.toLowerCase();
const IS_IE_9 = UA && UA.indexOf('msie 9.0') > 0;
class Rodal extends React.Component<Props, State> {
el: HTMLDivElement | null = null;
static defaultProps: DefaultProps;
state = {
isShow: false,
animationType: 'leave'
};
componentDidMount() {
if (this.props.visible) {
this.enter();
}
}
componentDidUpdate(prevProps: Props) {
if (this.props.visible && !prevProps.visible) {
this.enter();
}
if (!this.props.visible && prevProps.visible) {
this.leave();
}
}
enter() {
this.setState({
isShow: true,
animationType: 'enter'
});
}
leave() {
if (IS_IE_9) {
this.setState({
isShow: false
});
} else {
this.setState({
animationType: 'leave'
});
}
}
onKeyUp = (event: React.KeyboardEvent) => {
if (!this.props.closeOnEsc || event.keyCode !== 27) {
return;
}
this.props.onClose(event);
}; | const { animationType } = this.state;
const { closeOnEsc, onAnimationEnd } = this.props;
if (animationType === 'leave') {
this.setState({ isShow: false });
} else if (closeOnEsc) {
this.el?.focus();
}
if (event.target === this.el && onAnimationEnd) {
onAnimationEnd();
}
};
render() {
const {
closeMaskOnClick,
onClose,
customMaskStyles,
showMask,
duration,
className,
children,
showCloseButton,
popupClass
} = this.props;
const CloseButton = showCloseButton ? <span className="rodal-close"
onClick={onClose}
/> : null;
const { isShow, animationType } = this.state;
const Mask = showMask ? (
<div
className="rodal-mask"
style={customMaskStyles}
onClick={closeMaskOnClick ? onClose : void 0}
/>
) : null;
const style = {
display: isShow ? '' : 'none',
animationDuration: duration + 'ms',
WebkitAnimationDuration: duration + 'ms'
};
return (
<div
style={style}
className={cn('rodal', `rodal-fade-${animationType}`, className, 'rodal-background')}
onAnimationEnd={this.animationEnd}
tabIndex={-1}
ref={
el => {
this.el = el;
}
}
onKeyUp={this.onKeyUp}
>
{Mask}
<Dialog {...this.props}
animationType={animationType}
>
<div className={`child-wrapper ${popupClass}`}>
{children}
{CloseButton}
</div>
</Dialog>
</div>
);
}
}
Rodal.defaultProps = {
width: 400,
height: 240,
visible: false,
showMask: true,
closeOnEsc: false,
closeMaskOnClick: true,
showCloseButton: true,
animation: 'zoom', // slideup for msite;
duration: 300,
className: '',
customStyles: {},
customMaskStyles: {},
popupClass: 'popup-border',
enterAnimation: '',
leaveAnimation: '',
onAnimationEnd: () => { }
} as DefaultProps;
type State = {
isShow: boolean;
animationType: string;
}
type DefaultProps = {
width: number | string;
height: number | string;
visible: boolean;
showMask: boolean;
closeOnEsc: boolean;
closeMaskOnClick: boolean;
showCloseButton: boolean;
animation: 'fade' | 'zoom' | 'slideUp' | 'slideDown' | 'slideLeft' | 'slideRight' | 'slideUp' | 'rotate' | 'door';
enterAnimation: string;
leaveAnimation: string;
onAnimationEnd: () => void;
duration: number;
className: string;
customStyles: React.CSSProperties;
customMaskStyles: React.CSSProperties;
popupClass: string;
}
type RequiredProps = {
onClose: (e: React.MouseEvent | React.KeyboardEvent) => void;
}
type Props = DefaultProps & RequiredProps;
export default Rodal; |
animationEnd = (event: React.BaseSyntheticEvent) => { |
getBlock.py | from __future__ import division
import numpy as np
from procedural_city_generation.additional_stuff.Singleton import Singleton
from procedural_city_generation.polygons.Polygon2D import Polygon2D
singleton = Singleton("polygons")
def p_in_poly(poly, point):
x, y = point
n = len(poly)
inside = False
p1x, p1y = poly[0][0]
for i in range(n+1):
p2x, p2y = poly[i % n][0]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
return inside
def getBlock(wedges, vertex_list):
|
if __name__ == "__main__":
import matplotlib.pyplot as plt
import construct_polygons as cp
polys, vertices = cp.main()
for p in getBlock(polys[1], vertices):
p.selfplot()
plt.show()
| '''Calculate block to be divided into lots, as well as street polygons'''
old_vertices = [vertex_list[wedge.b] for wedge in wedges]
old_poly = Polygon2D([v.coords for v in old_vertices])
new_vertices = []
polylist = []
last2 = []
for i in range(len(old_vertices)):
# Calculate position of new vertex
alpha = wedges[i-1].alpha
a, b, c = old_vertices[i-2], old_vertices[i-1], old_vertices[i]
v1 = a.coords - b.coords
v2 = c.coords - b.coords
n1 = np.array((-v1[1], v1[0]))/np.linalg.norm(v1)
n2 = np.array((v2[1], -v2[0]))/np.linalg.norm(v2)
# Change lengths of normal vectors depending on whether each
# edge is a minor road or a main road
if b.minor_road or a.minor_road:
n1 *= singleton.minor_factor
else:
n1 *= singleton.main_factor
if b.minor_road or c.minor_road:
n2 *= singleton.minor_factor
else:
n2 *= singleton.main_factor
# Check if current vertex is dead end
if not 0 - 0.001 < alpha < 0 + 0.001:
# Not a dead end: move edges which share this vertex
# inwards along their normal vectors, find intersection
try:
intersection = np.linalg.solve(
np.array(((v1), (v2))).T, (b.coords+n2)-(b.coords+n1))
except np.linalg.LinAlgError:
raise Exception(str(v1)+", "+str(v2),
"angle: "+str(wedges[i-1].alpha))
new = b.coords + n1 + intersection[0]*v1
# Check if new vertex is in old polygon
if p_in_poly(old_poly.edges, new):
# Append new vertex to lot polygon
new_vertices.append(new)
these2 = [b.coords, new]
if last2:
street_vertices = last2 + these2
polylist.append(
Polygon2D(street_vertices, poly_type="road"))
last2 = these2[::-1]
else:
# New vertex not in polygon, return old polygon as street polygon
return [old_poly]
else:
# Dead end: determine two new vertices by adding the two normals
# to current vector, then check if these are in old polygon
new1, new2 = b.coords + n1, b.coords + n2
if p_in_poly(old_poly.edges, new1) and p_in_poly(old_poly.edges, new2):
new_vertices += [new1, new2]
if last2:
street_vertices = last2 + [b.coords, new1]
polylist.append(
Polygon2D(street_vertices, poly_type="road"))
street_vertices = [b.coords, new2, new1]
polylist.append(
Polygon2D(street_vertices, poly_type="road"))
last2 = [new2, b.coords]
else:
old_poly.poly_type = "road"
return [old_poly]
street_vertices = last2 + [old_vertices[-1].coords, new_vertices[0]]
polylist.append(Polygon2D(street_vertices, poly_type="road"))
# All new vertices are in old polygon: append block polygon
block_poly = Polygon2D(new_vertices)
if block_poly.area < singleton.max_area:
block_poly.poly_type = "lot"
polylist.append(block_poly)
return polylist |
listen-main.go | package main
import (
"encoding/json"
"flag"
"fmt"
"github.com/sevlyar/go-daemon"
"log"
"net/http"
"os"
)
var addr = flag.String("addr", websocketServer, "http service address")
var mode = flag.String("mode", "", "update or not")
func main() {
var args []string
flag.Parse()
args = append(args, "[k8sMLer daemon]")
if *mode != "" {
args = append(args, *mode)
}
cntxt := &daemon.Context{
PidFileName: "k8sMLer.pid",
PidFilePerm: 0644,
LogFileName: "k8sMLer.log",
LogFilePerm: 0640,
WorkDir: "./",
Env: nil,
Args: args,
Umask: 027,
}
d, err := cntxt.Reborn() // like fork
if err != nil {
log.Fatalln("Unable to run: ", err)
}
if d != nil {
return // child is ready, return parent
}
defer cntxt.Release()
var mod string
if len(os.Args) > 1 {
// update
mod = os.Args[1]
}
log.Print("- - - - - - - - - - - - - - -")
log.Print("K8sMLer daemon started")
listen_main(mod)
}
func listen_main(mod string) |
func init() {
/*file, err := os.OpenFile("k8sMLer.err",
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
log.Fatalln("Failed to open error log file:", err)
}*/
Trace = log.New(os.Stdout,
"TRACE: ",
log.Ldate|log.Ltime|log.Lshortfile)
Info = log.New(os.Stdout,
"INFO: ",
log.Ldate|log.Ltime|log.Lshortfile)
Warning = log.New(os.Stdout,
"WARNING: ",
log.Ldate|log.Ltime|log.Lshortfile)
/*Error = log.New(io.MultiWriter(file, os.Stderr),
"ERROR: ",
log.Ldate|log.Ltime|log.Lshortfile)*/
Error = log.New(os.Stdout,
"ERROR: ",
log.Ldate|log.Ltime|log.Lshortfile)
}
| {
QUEUELIST = make([]*headNode, 0)
IP_POOL = make(map[string]bool)
if mod == MOD_UPDATE {
Trace.Println("update mode start up, recovery IP-POOL...")
tmpbyte := make([]byte, 4096)
file, error := os.OpenFile(".ippool", os.O_RDONLY, 0766)
if error != nil {
fmt.Println(error)
}
defer file.Close()
total, err := file.Read(tmpbyte)
if err != nil {
Error.Println(err)
}
err = json.Unmarshal(tmpbyte[:total], &IP_POOL) // tmpbyte[:total] for error invalid character '\x00' after top-level value
if err != nil {
Error.Println(err)
}
/*//validation
fmt.Println("'''''''''''''''''''''''''''''''''")
for k, v := range IP_POOL {
fmt.Println(k, v)
}
fmt.Println("'''''''''''''''''''''''''''''''''")*/
}
hub := newHub()
go hub.run()
UPDATEMAP = make(map[string][]string)
http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
serveWs(hub, writer, request, mod)
})
err := http.ListenAndServe(*addr, nil)
if err != nil {
Error.Println("ListenAndServe: ", err)
}
} |
config.go | package ffuf
import (
"context"
)
type Config struct {
Headers map[string]string `json:"headers"`
Extensions []string `json:"extensions"`
DirSearchCompat bool `json:"dirsearch_compatibility"`
Method string `json:"method"`
Url string `json:"url"`
Data string `json:"postdata"`
Quiet bool `json:"quiet"`
Colors bool `json:"colors"`
InputProviders []InputProviderConfig `json:"inputproviders"`
CommandKeywords []string `json:"-"`
InputNum int `json:"cmd_inputnum"`
InputMode string `json:"inputmode"`
OutputDirectory string `json:"outputdirectory"`
OutputFile string `json:"outputfile"`
OutputFormat string `json:"outputformat"`
IgnoreBody bool `json:"ignorebody"`
NoBanner bool `json:"nobanner"`
IgnoreWordlistComments bool `json:"ignore_wordlist_comments"`
StopOn403 bool `json:"stop_403"`
StopOnErrors bool `json:"stop_errors"`
StopOnAll bool `json:"stop_all"`
FollowRedirects bool `json:"follow_redirects"`
AutoCalibration bool `json:"autocalibration"`
AutoCalibrationStrings []string `json:"autocalibration_strings"`
Timeout int `json:"timeout"`
ProgressFrequency int `json:"-"`
Delay optRange `json:"delay"`
Filters map[string]FilterProvider `json:"filters"`
Matchers map[string]FilterProvider `json:"matchers"`
Threads int `json:"threads"`
Context context.Context `json:"-"`
ProxyURL string `json:"proxyurl"`
ReplayProxyURL string `json:"replayproxyurl"`
CommandLine string `json:"cmdline"`
Verbose bool `json:"verbose"`
MaxTime int `json:"maxtime"`
MaxTimeJob int `json:"maxtime_job"`
Recursion bool `json:"recursion"`
RecursionDepth int `json:"recursion_depth"`
}
type InputProviderConfig struct {
Name string `json:"name"`
Keyword string `json:"keyword"`
Value string `json:"value"`
}
func | (ctx context.Context) Config {
var conf Config
conf.Context = ctx
conf.Headers = make(map[string]string)
conf.Method = "GET"
conf.Url = ""
conf.Data = ""
conf.Quiet = false
conf.IgnoreWordlistComments = false
conf.StopOn403 = false
conf.StopOnErrors = false
conf.StopOnAll = false
conf.FollowRedirects = false
conf.InputProviders = make([]InputProviderConfig, 0)
conf.CommandKeywords = make([]string, 0)
conf.AutoCalibrationStrings = make([]string, 0)
conf.InputNum = 0
conf.InputMode = "clusterbomb"
conf.ProxyURL = ""
conf.Filters = make(map[string]FilterProvider)
conf.Matchers = make(map[string]FilterProvider)
conf.Delay = optRange{0, 0, false, false}
conf.Extensions = make([]string, 0)
conf.Timeout = 10
// Progress update frequency, in milliseconds
conf.ProgressFrequency = 100
conf.DirSearchCompat = false
conf.Verbose = false
conf.MaxTime = 0
conf.MaxTimeJob = 0
conf.Recursion = false
conf.RecursionDepth = 0
return conf
}
| NewConfig |
encoders_test.go | package main
import (
"crypto/ed25519"
"fmt"
"testing"
)
var b32TestCases = []testCase{
testCase{
decoded: []byte{59, 73, 66, 126, 252, 150, 123, 166, 113, 107, 198, 52, 255, 236, 72, 112, 9, 146, 232, 12, 69, 165, 210, 202, 156, 63, 51, 62, 106, 207, 182, 107},
encoded: "7D4M4ZQWJSXTCWBBRRTFZV28E04S5T0C8PJX5JMW7WSKWTPFPSNG",
},
testCase{
decoded: []byte{143, 151, 30, 105, 79, 74, 193, 242, 224, 97, 106, 227, 223, 99, 236, 225, 145, 236, 152, 143, 230, 159, 247, 50, 72, 147, 217, 248, 255, 67, 126, 116},
encoded: "HYBHWTAF9B0Z5R31DBHXYRZCW68YS64FWTFZECJ8JFCZHZT3FST0",
},
testCase{
decoded: []byte{100, 138, 58, 29, 215, 203, 249, 249, 62, 224, 216, 70, 191, 13, 224, 150, 174, 81, 39, 125, 64, 93, 9, 192, 175, 93, 64, 75, 181, 93, 81, 22},
encoded: "CJ53M7EQSFWZJFQ0V13BY3F0JTQ529VX81EGKG5FBN04QDAXA4B0",
},
testCase{
decoded: []byte{145, 30, 158, 33, 248, 234, 78, 70, 108, 212, 167, 42, 151, 249, 37, 177, 36, 250, 110, 73, 89, 241, 190, 70, 7, 142, 119, 158, 15, 232, 228, 115},
encoded: "J4F9W8FRX974CV6MMWN9FY95P4JFMVJ9B7RVWHG7HSVSW3Z8WHSG",
},
testCase{
decoded: []byte{37, 190, 191, 20, 201, 161, 145, 108, 193, 112, 198, 34, 70, 92, 202, 167, 162, 124, 60, 25, 10, 67, 41, 140, 96, 103, 124, 71, 72, 191, 144, 0},
encoded: "4PZBY569M68PSGBGRRH4CQ6AMYH7RF0S191JK330CXY4EJ5ZJ000",
},
testCase{
decoded: []byte{233, 132, 69, 72, 63, 230, 64, 151, 188, 152, 73, 210, 186, 131, 153, 16, 14, 45, 110, 197, 208, 121, 102, 71, 232, 141, 240, 85, 238, 138, 91, 47},
encoded: "X624AJ1ZWS09FF4R979BN0WS2072TVP5T1WPCHZ8HQR5BVMABCQG",
},
testCase{
decoded: []byte{70, 145, 156, 235, 127, 126, 254, 123, 13, 86, 173, 10, 182, 10, 39, 151, 200, 255, 56, 48, 38, 61, 155, 72, 1, 117, 232, 111, 145, 93, 184, 104},
encoded: "8T8SSTVZFVZ7P3APNM5BC2H7JZ4FYE1G4RYSPJ01EQM6Z4AXQ1M0",
},
testCase{
decoded: []byte{40, 63, 195, 179, 116, 218, 206, 16, 126, 171, 14, 202, 210, 155, 187, 6, 117, 172, 181, 137, 46, 251, 109, 24, 107, 252, 33, 95, 206, 56, 31, 26},
encoded: "50ZW7CVMVB710ZNB1V5D56XV0STTSDC95VXPT63BZGGNZKHR3WD0",
},
testCase{
decoded: []byte{16, 249, 237, 62, 116, 10, 80, 20, 123, 50, 75, 103, 228, 127, 214, 26, 199, 49, 83, 34, 66, 24, 242, 155, 240, 60, 18, 25, 205, 187, 156, 76},
encoded: "23WYTFKM19818YSJ9DKY8ZYP3B3K2MS288CF56ZG7G91KKDVKH60",
},
testCase{
decoded: []byte{233, 110, 203, 25, 190, 221, 178, 24, 29, 138, 26, 65, 46, 246, 187, 122, 92, 164, 70, 199, 71, 11, 113, 163, 218, 251, 157, 151, 127, 152, 213, 192},
encoded: "X5QCP6DYVPS1G7CA390JXXNVF9EA8HP78W5Q38YTZEESEZWRTQ00",
},
}
func TestB32Encode(t *testing.T) {
for _, test := range b32TestCases {
actual := B32Encode(test.decoded)
expected := test.encoded
if actual != expected {
fmt.Printf("FAIL:\n Exp: %s\n Act: %s\n", expected, actual)
t.Fail()
}
}
}
func TestEncodePeerMhash(t *testing.T) | {
pub := ed25519.PublicKey{
0x1c,
0xff,
0xcf,
0x5d,
0xd4,
0xa2,
0x1d,
0x26,
0xef,
0xab,
0x55,
0xdd,
0x86,
0x46,
0xcd,
0x1d,
0x93,
0x46,
0x27,
0x18,
0xfd,
0x85,
0xb4,
0xe8,
0xb7,
0x21,
0x71,
0x5c,
0xfe,
0x36,
0xdb,
0xa8,
}
results := encodePeerMhash(pub)
expected := "USER.3KZWYQEMM8EJDVXBAQERCHPD3P9MC9RRZP2V9T5Q45RNSZHPVEM0"
if results != expected {
t.Fail()
}
} |
|
algo.rs | //! FIXME: write short doc here
use std::{
fmt,
ops::{self, RangeInclusive},
};
use itertools::Itertools;
use ra_text_edit::TextEditBuilder;
use rustc_hash::FxHashMap;
use crate::{
AstNode, Direction, NodeOrToken, SyntaxElement, SyntaxKind, SyntaxNode, SyntaxNodePtr,
SyntaxToken, TextRange, TextSize,
};
/// Returns ancestors of the node at the offset, sorted by length. This should
/// do the right thing at an edge, e.g. when searching for expressions at `{
/// <|>foo }` we will get the name reference instead of the whole block, which
/// we would get if we just did `find_token_at_offset(...).flat_map(|t|
/// t.parent().ancestors())`.
pub fn ancestors_at_offset(
node: &SyntaxNode,
offset: TextSize,
) -> impl Iterator<Item = SyntaxNode> {
node.token_at_offset(offset)
.map(|token| token.parent().ancestors())
.kmerge_by(|node1, node2| node1.text_range().len() < node2.text_range().len())
}
/// Finds a node of specific Ast type at offset. Note that this is slightly
/// imprecise: if the cursor is strictly between two nodes of the desired type,
/// as in
///
/// ```no-run
/// struct Foo {}|struct Bar;
/// ```
///
/// then the shorter node will be silently preferred.
pub fn find_node_at_offset<N: AstNode>(syntax: &SyntaxNode, offset: TextSize) -> Option<N> {
ancestors_at_offset(syntax, offset).find_map(N::cast)
}
/// Skip to next non `trivia` token
pub fn skip_trivia_token(mut token: SyntaxToken, direction: Direction) -> Option<SyntaxToken> {
while token.kind().is_trivia() {
token = match direction {
Direction::Next => token.next_token()?,
Direction::Prev => token.prev_token()?,
}
}
Some(token)
}
/// Finds the first sibling in the given direction which is not `trivia`
pub fn non_trivia_sibling(element: SyntaxElement, direction: Direction) -> Option<SyntaxElement> {
return match element {
NodeOrToken::Node(node) => node.siblings_with_tokens(direction).skip(1).find(not_trivia),
NodeOrToken::Token(token) => token.siblings_with_tokens(direction).skip(1).find(not_trivia),
};
fn not_trivia(element: &SyntaxElement) -> bool {
match element {
NodeOrToken::Node(_) => true,
NodeOrToken::Token(token) => !token.kind().is_trivia(),
}
}
}
pub fn find_covering_element(root: &SyntaxNode, range: TextRange) -> SyntaxElement {
root.covering_element(range)
}
pub fn least_common_ancestor(u: &SyntaxNode, v: &SyntaxNode) -> Option<SyntaxNode> {
if u == v {
return Some(u.clone());
}
let u_depth = u.ancestors().count();
let v_depth = v.ancestors().count();
let keep = u_depth.min(v_depth);
let u_candidates = u.ancestors().skip(u_depth - keep);
let v_canidates = v.ancestors().skip(v_depth - keep);
let (res, _) = u_candidates.zip(v_canidates).find(|(x, y)| x == y)?;
Some(res)
}
pub fn neighbor<T: AstNode>(me: &T, direction: Direction) -> Option<T> {
me.syntax().siblings(direction).skip(1).find_map(T::cast)
}
pub fn has_errors(node: &SyntaxNode) -> bool {
node.children().any(|it| it.kind() == SyntaxKind::ERROR)
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum InsertPosition<T> {
First,
Last,
Before(T),
After(T),
}
pub struct TreeDiff {
replacements: FxHashMap<SyntaxElement, SyntaxElement>,
}
impl TreeDiff {
pub fn into_text_edit(&self, builder: &mut TextEditBuilder) {
for (from, to) in self.replacements.iter() {
builder.replace(from.text_range(), to.to_string())
}
}
pub fn is_empty(&self) -> bool {
self.replacements.is_empty()
}
}
/// Finds minimal the diff, which, applied to `from`, will result in `to`.
///
/// Specifically, returns a map whose keys are descendants of `from` and values
/// are descendants of `to`, such that `replace_descendants(from, map) == to`.
///
/// A trivial solution is a singleton map `{ from: to }`, but this function
/// tries to find a more fine-grained diff.
pub fn diff(from: &SyntaxNode, to: &SyntaxNode) -> TreeDiff {
let mut buf = FxHashMap::default();
// FIXME: this is both horrible inefficient and gives larger than
// necessary diff. I bet there's a cool algorithm to diff trees properly.
go(&mut buf, from.clone().into(), to.clone().into());
return TreeDiff { replacements: buf };
fn go(
buf: &mut FxHashMap<SyntaxElement, SyntaxElement>,
lhs: SyntaxElement,
rhs: SyntaxElement,
) {
if lhs.kind() == rhs.kind()
&& lhs.text_range().len() == rhs.text_range().len()
&& match (&lhs, &rhs) {
(NodeOrToken::Node(lhs), NodeOrToken::Node(rhs)) => {
lhs.green() == rhs.green() || lhs.text() == rhs.text()
}
(NodeOrToken::Token(lhs), NodeOrToken::Token(rhs)) => lhs.text() == rhs.text(),
_ => false,
}
{
return;
}
if let (Some(lhs), Some(rhs)) = (lhs.as_node(), rhs.as_node()) {
if lhs.children_with_tokens().count() == rhs.children_with_tokens().count() {
for (lhs, rhs) in lhs.children_with_tokens().zip(rhs.children_with_tokens()) {
go(buf, lhs, rhs)
}
return;
}
}
buf.insert(lhs, rhs);
}
}
/// Adds specified children (tokens or nodes) to the current node at the
/// specific position.
///
/// This is a type-unsafe low-level editing API, if you need to use it,
/// prefer to create a type-safe abstraction on top of it instead.
pub fn insert_children(
parent: &SyntaxNode,
position: InsertPosition<SyntaxElement>,
to_insert: impl IntoIterator<Item = SyntaxElement>,
) -> SyntaxNode {
let mut to_insert = to_insert.into_iter();
_insert_children(parent, position, &mut to_insert)
}
fn _insert_children(
parent: &SyntaxNode,
position: InsertPosition<SyntaxElement>,
to_insert: &mut dyn Iterator<Item = SyntaxElement>,
) -> SyntaxNode {
let mut delta = TextSize::default();
let to_insert = to_insert.map(|element| {
delta += element.text_range().len();
to_green_element(element)
});
let mut old_children = parent.green().children().map(|it| match it {
NodeOrToken::Token(it) => NodeOrToken::Token(it.clone()),
NodeOrToken::Node(it) => NodeOrToken::Node(it.clone()),
});
let new_children = match &position {
InsertPosition::First => to_insert.chain(old_children).collect::<Vec<_>>(),
InsertPosition::Last => old_children.chain(to_insert).collect::<Vec<_>>(),
InsertPosition::Before(anchor) | InsertPosition::After(anchor) => {
let take_anchor = if let InsertPosition::After(_) = position { 1 } else { 0 };
let split_at = position_of_child(parent, anchor.clone()) + take_anchor;
let before = old_children.by_ref().take(split_at).collect::<Vec<_>>();
before.into_iter().chain(to_insert).chain(old_children).collect::<Vec<_>>()
}
};
with_children(parent, new_children)
}
/// Replaces all nodes in `to_delete` with nodes from `to_insert`
///
/// This is a type-unsafe low-level editing API, if you need to use it,
/// prefer to create a type-safe abstraction on top of it instead.
pub fn replace_children(
parent: &SyntaxNode,
to_delete: RangeInclusive<SyntaxElement>,
to_insert: impl IntoIterator<Item = SyntaxElement>,
) -> SyntaxNode {
let mut to_insert = to_insert.into_iter();
_replace_children(parent, to_delete, &mut to_insert)
}
fn _replace_children(
parent: &SyntaxNode,
to_delete: RangeInclusive<SyntaxElement>,
to_insert: &mut dyn Iterator<Item = SyntaxElement>,
) -> SyntaxNode {
let start = position_of_child(parent, to_delete.start().clone());
let end = position_of_child(parent, to_delete.end().clone());
let mut old_children = parent.green().children().map(|it| match it {
NodeOrToken::Token(it) => NodeOrToken::Token(it.clone()),
NodeOrToken::Node(it) => NodeOrToken::Node(it.clone()),
});
let before = old_children.by_ref().take(start).collect::<Vec<_>>();
let new_children = before
.into_iter()
.chain(to_insert.map(to_green_element))
.chain(old_children.skip(end + 1 - start))
.collect::<Vec<_>>();
with_children(parent, new_children)
}
#[derive(Default)]
pub struct SyntaxRewriter<'a> {
f: Option<Box<dyn Fn(&SyntaxElement) -> Option<SyntaxElement> + 'a>>,
//FIXME: add debug_assertions that all elements are in fact from the same file.
replacements: FxHashMap<SyntaxElement, Replacement>,
}
impl fmt::Debug for SyntaxRewriter<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SyntaxRewriter").field("replacements", &self.replacements).finish()
}
}
impl<'a> SyntaxRewriter<'a> {
pub fn from_fn(f: impl Fn(&SyntaxElement) -> Option<SyntaxElement> + 'a) -> SyntaxRewriter<'a> {
SyntaxRewriter { f: Some(Box::new(f)), replacements: FxHashMap::default() }
}
pub fn delete<T: Clone + Into<SyntaxElement>>(&mut self, what: &T) {
let what = what.clone().into();
let replacement = Replacement::Delete;
self.replacements.insert(what, replacement);
}
pub fn replace<T: Clone + Into<SyntaxElement>>(&mut self, what: &T, with: &T) {
let what = what.clone().into();
let replacement = Replacement::Single(with.clone().into());
self.replacements.insert(what, replacement);
}
pub fn replace_with_many<T: Clone + Into<SyntaxElement>>(
&mut self,
what: &T,
with: Vec<SyntaxElement>,
) {
let what = what.clone().into();
let replacement = Replacement::Many(with);
self.replacements.insert(what, replacement);
}
pub fn replace_ast<T: AstNode>(&mut self, what: &T, with: &T) {
self.replace(what.syntax(), with.syntax())
}
pub fn rewrite(&self, node: &SyntaxNode) -> SyntaxNode {
if self.f.is_none() && self.replacements.is_empty() {
return node.clone();
}
self.rewrite_children(node) | N::cast(self.rewrite(node.syntax())).unwrap()
}
/// Returns a node that encompasses all replacements to be done by this rewriter.
///
/// Passing the returned node to `rewrite` will apply all replacements queued up in `self`.
///
/// Returns `None` when there are no replacements.
pub fn rewrite_root(&self) -> Option<SyntaxNode> {
assert!(self.f.is_none());
self.replacements
.keys()
.map(|element| match element {
SyntaxElement::Node(it) => it.clone(),
SyntaxElement::Token(it) => it.parent(),
})
// If we only have one replacement, we must return its parent node, since `rewrite` does
// not replace the node passed to it.
.map(|it| it.parent().unwrap_or(it))
.fold1(|a, b| least_common_ancestor(&a, &b).unwrap())
}
fn replacement(&self, element: &SyntaxElement) -> Option<Replacement> {
if let Some(f) = &self.f {
assert!(self.replacements.is_empty());
return f(element).map(Replacement::Single);
}
self.replacements.get(element).cloned()
}
fn rewrite_children(&self, node: &SyntaxNode) -> SyntaxNode {
// FIXME: this could be made much faster.
let mut new_children = Vec::new();
for child in node.children_with_tokens() {
self.rewrite_self(&mut new_children, &child);
}
with_children(node, new_children)
}
fn rewrite_self(
&self,
acc: &mut Vec<NodeOrToken<rowan::GreenNode, rowan::GreenToken>>,
element: &SyntaxElement,
) {
if let Some(replacement) = self.replacement(&element) {
match replacement {
Replacement::Single(NodeOrToken::Node(it)) => {
acc.push(NodeOrToken::Node(it.green().clone()))
}
Replacement::Single(NodeOrToken::Token(it)) => {
acc.push(NodeOrToken::Token(it.green().clone()))
}
Replacement::Many(replacements) => {
acc.extend(replacements.iter().map(|it| match it {
NodeOrToken::Node(it) => NodeOrToken::Node(it.green().clone()),
NodeOrToken::Token(it) => NodeOrToken::Token(it.green().clone()),
}))
}
Replacement::Delete => (),
};
return;
}
let res = match element {
NodeOrToken::Token(it) => NodeOrToken::Token(it.green().clone()),
NodeOrToken::Node(it) => NodeOrToken::Node(self.rewrite_children(it).green().clone()),
};
acc.push(res)
}
}
impl ops::AddAssign for SyntaxRewriter<'_> {
fn add_assign(&mut self, rhs: SyntaxRewriter) {
assert!(rhs.f.is_none());
self.replacements.extend(rhs.replacements)
}
}
#[derive(Clone, Debug)]
enum Replacement {
Delete,
Single(SyntaxElement),
Many(Vec<SyntaxElement>),
}
fn with_children(
parent: &SyntaxNode,
new_children: Vec<NodeOrToken<rowan::GreenNode, rowan::GreenToken>>,
) -> SyntaxNode {
let len = new_children.iter().map(|it| it.text_len()).sum::<TextSize>();
let new_node = rowan::GreenNode::new(rowan::SyntaxKind(parent.kind() as u16), new_children);
let new_root_node = parent.replace_with(new_node);
let new_root_node = SyntaxNode::new_root(new_root_node);
// FIXME: use a more elegant way to re-fetch the node (#1185), make
// `range` private afterwards
let mut ptr = SyntaxNodePtr::new(parent);
ptr.range = TextRange::at(ptr.range.start(), len);
ptr.to_node(&new_root_node)
}
fn position_of_child(parent: &SyntaxNode, child: SyntaxElement) -> usize {
parent
.children_with_tokens()
.position(|it| it == child)
.expect("element is not a child of current element")
}
fn to_green_element(element: SyntaxElement) -> NodeOrToken<rowan::GreenNode, rowan::GreenToken> {
match element {
NodeOrToken::Node(it) => it.green().clone().into(),
NodeOrToken::Token(it) => it.green().clone().into(),
}
} | }
pub fn rewrite_ast<N: AstNode>(self, node: &N) -> N { |
command.rs | use std::ffi::OsStr;
use std::io;
use std::process::{self, Command};
use crate::errors::*;
use crate::utils::utils::ExitCode;
pub fn run_command_for_dir<S: AsRef<OsStr>>(
mut cmd: Command,
arg0: &str,
args: &[S],
) -> Result<ExitCode> {
cmd.args(args);
// FIXME rust-lang/rust#32254. It's not clear to me
// when and why this is needed.
// TODO: currentprocess support for mocked file descriptor inheritance here: until
// then tests that depend on rustups stdin being inherited won't work in-process.
cmd.stdin(process::Stdio::inherit());
return exec(&mut cmd).chain_err(|| crate::ErrorKind::RunningCommand {
name: OsStr::new(arg0).to_owned(),
});
#[cfg(unix)]
fn exec(cmd: &mut Command) -> io::Result<ExitCode> {
use std::os::unix::prelude::*;
Err(cmd.exec())
}
#[cfg(windows)]
fn | (cmd: &mut Command) -> io::Result<ExitCode> {
use winapi::shared::minwindef::{BOOL, DWORD, FALSE, TRUE};
use winapi::um::consoleapi::SetConsoleCtrlHandler;
unsafe extern "system" fn ctrlc_handler(_: DWORD) -> BOOL {
// Do nothing. Let the child process handle it.
TRUE
}
unsafe {
if SetConsoleCtrlHandler(Some(ctrlc_handler), TRUE) == FALSE {
return Err(io::Error::new(
io::ErrorKind::Other,
"Unable to set console handler",
));
}
}
let status = cmd.status()?;
Ok(ExitCode(status.code().unwrap()))
}
}
| exec |
Gomoku_minimax.py | #Import modules and libraries
from random import randint
from string import ascii_uppercase, ascii_lowercase
from itertools import permutations
from copy import deepcopy
from tail_recursion import tail_recursive, recurse
#Define board mapping function
def mapBoard(col, row, value):
board = [[value for x in range(col)] for y in range(row)]
return board
#Define metaboard mapping function
def mapMetaBoard(col, row):
metaboard = [[[[0, 0, 0, 0], [0, 0, 0, 0]] for x in range(col)] for y in range(row)]
return metaboard
#Define view board function
def viewBoard(board):
alphabet = ascii_uppercase
col = len(board[0])
row = len(board)
border = ""
topBorder = "#||"
for i in range(col):
border += "_" * 2
topBorder += alphabet[i]
topBorder += " "
border += "___"
print(topBorder)
print(border)
for i in range(row):
print(alphabet[i] + "||" + " ".join(board[i]) + "|")
#Define mark function
def mark(board, signature):
alphabet = ascii_uppercase
alphabet1 = ascii_lowercase
dimensionY = len(board)
dimensionX = len(board[0])
valid = False
while (not valid):
print("\n\nWhere do you want to mark?\n\n")
x = input(f"Column (A - {alphabet[dimensionX - 1]})? ")
y = input(f"Row (A - {alphabet[dimensionY - 1]})? ")
try:
x = alphabet.index(x)
except ValueError:
x = alphabet1.index(x)
try:
y = alphabet.index(y)
except:
y = alphabet1.index(y)
if (board[y][x] == ' '):
valid = True
else:
print('That position has already been marked. Please try again.\n')
board[y][x] = signature
print('\n')
viewBoard(board)
#Define function to find all occurences of 'X'
#Value is [opponentSignature]
#Return [[col1, row1], [col2, row2], ...]
def locate(value, board):
dimensionY = len(board)
dimensionX = len(board[0])
returnList = []
for row in range(dimensionY):
for col in range(dimensionX):
if (board[row][col] in value): returnList.append([col, row])
return returnList
#Define computer's turn -- recursive
@tail_recursive
def play(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, first = True):
#AI
#Each of metaboard's position is a list [danger, opportunity]
#Define function to update metaboard
#TODO: refine to improve efficiency at detecting risks and opportunities of non-continuous streak & multi-directional streaks
#REQUIREMENTS 1: resonant effect on a tile immediately next to a continuous winCond - 1 streak == risk/opportunity factor of interrupted resonance on a tile conjoining 2 aligning sub-streaks whose sum >= winCond - 1
#REQUIREMENTS 2: implement weighted resonance system on a tile conjoining multiple directional streaks > resonance system for linear streaks
def meta(board, opponentSignature, selfSignature, winCond, difficulty):
#Define function to sweep perimeter of a position's coordinates and add attributes to them
#coord = [col, row]
def sweep(metaboard, coord, keyword, opponentSignature, selfSignature, winCond):
if (keyword == 'danger'):
type = 0
otherType = 1
signature = opponentSignature
else:
type = 1
otherType = 0
signature = selfSignature
coordVars = list(permutations([-1, 0, 1], 2))
coordVars.extend(((-1, -1), (1, 1)))
for coordVar in coordVars:
try:
if (coordVar in [(-1, -1), (1, 1)]):
pos = 2
elif (coordVar in [(0, -1), (0, 1)]):
pos = 0
elif (coordVar in [(-1, 0), (1, 0)]):
pos = 1
else:
pos = 3
row = coord[1] + coordVar[0]
if (row < 0 or row > len(metaboard)): raise IndexError
col = coord[0] + coordVar[1]
if (col < 0 or col > len(metaboard[0])): raise IndexError
#Ripple effect
if (not isinstance(metaboard[row][col], str)):
for i in range(winCond - 1):
if (not isinstance(metaboard[row][col], str)):
metaboard[row][col][type][pos] += (1 - i/(winCond - 1))
metaboard[row][col][otherType][pos] -= (1 - i/(winCond - 1))
row += coordVar[0]
if (row < 0 or row > len(metaboard)): raise IndexError
col += coordVar[1]
if (col < 0 or col > len(metaboard[0])): raise IndexError
elif (metaboard[row][col] == signature):
row += coordVar[0]
if (row < 0 or row > len(metaboard)): raise IndexError
col += coordVar[1]
if (col < 0 or col > len(metaboard[0])): raise IndexError
else:
raise IndexError
#alphabet = ascii_uppercase
#print(f'Metaboard at column {alphabet[col]} and row {alphabet[row]} has a {keyword} level of {metaboard[row][col][type]}.')
#Resonance effect
if (metaboard[row][col] == signature):
alignment = 0
while (metaboard[row][col] == signature):
row += coordVar[0]
if (row < 0 or row > len(metaboard)): raise IndexError
col += coordVar[1]
if (col < 0 or col > len(metaboard[0])): raise IndexError
alignment += 1
if (isinstance(metaboard[row][col], list)):
metaboard[row][col][type][pos] += alignment
except IndexError: pass
#Define function to screen entire metaboard for invalidation
def screen(metaboard, selfSignature, opponentSignature, winCond):
#Define function to rotate board 90 degree counter-clockwise with perspective to keeping OG board intact
def rotate(board):
#Define function to inverse board vertically
def invertY(board):
invertYBoard = []
dimensionY = len(board)
for row in range(dimensionY):
invertYBoard.append(board[dimensionY - row - 1])
return invertYBoard
rotateBoard = []
dimensionY = len(board)
dimensionX = len(board[0])
for col in range(dimensionX):
column = [board[row][col] for row in range(dimensionY)]
rotateBoard.append(column)
return invertY(rotateBoard)
#Define function to screen the top left corner of the board
def screenTopLeftCorner(metaboard, winCond, pos, name):
for row in range(winCond - 1):
for col in range(winCond - 1 - row):
if (isinstance(metaboard[row][col], list)):
#print(f'nullify {row}:{col}\'s danger and potential in the {name} diagonal')
metaboard[row][col][0][pos] = 0
metaboard[row][col][1][pos] = 0
#Define function to screen metaboard to invalidate 'type' from signature (e.g, invalidate dangers between two blocked self) horizontally
def screenHorizontal(metaboard, signature, type, winCond, pos):
dimensionX = len(metaboard[0])
if type == 'danger': type = 0
else: type = 1
#Format all selfSignature's coords found in each row
#sus = [susRow1, susRow3, ...]
#susRow1 = [[col1, row], [col3, row], ...]
sus = []
for row in metaboard:
susEachRow = []
for col in row:
if (col == signature): susEachRow.append([row.index(col), metaboard.index(row)])
sus.append(susEachRow)
sus = [susEachRow for susEachRow in sus if len(susEachRow) != 0]
#Filter out all invalid segments between two blocked self horizontally
for susEachRow in sus:
for i in range(len(susEachRow) - 1):
if (2 <= susEachRow[i + 1][0] - susEachRow[i][0] <= winCond):
for k in range(0, susEachRow[i + 1][0] - susEachRow[i][0]):
if (isinstance(metaboard[susEachRow[i][1]][susEachRow[i][0] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {susEachRow[i][0]}:{susEachRow[i][1]} and {susEachRow[i + 1][0]}:{susEachRow[i + 1][1]}, the position with the coordinates {susEachRow[i][1]}:{susEachRow[i][0] + k} has been nullified of its {type}\'s {pos}.')
metaboard[susEachRow[i][1]][susEachRow[i][0] + k][type][pos] = 0
#Filter out all invalid segments between self and border
for susEachRow in sus:
start = susEachRow[0]
end = susEachRow[-1]
if (1 <= start[0] < winCond):
for k in range(0, start[0]):
if (isinstance(metaboard[start[1]][k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {start[0]}:{start[1]} and the border, the position with the coordinates {start[1]}:{k} has been nullified of its {type}\'s {pos}.')
metaboard[start[1]][k][type][pos] = 0
if (1 <= dimensionX - end[0] - 1 < winCond):
for k in range(0, dimensionX - end[0] - 1):
if (isinstance(metaboard[end[1]][end[0] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {end[0]}:{end[1]} and the border, the position with the coordinates {end[1]}:{end[0] + k} has been nullified of its {type}\'s {pos}.')
metaboard[end[1]][end[0] + k][type][pos] = 0
return metaboard
#Define function to screen metaboard to invalidate 'type' from signature (e.g, invalidate dangers between two blocked self) diagonally
def screenDiagonal(metaboard, signature, type, winCond, pos):
dimensionY = len(metaboard)
dimensionX = len(metaboard[0])
if type == 'danger': type = 0
else: type = 1
#Format all selfSignature's coords found in each diagonal
#susDiagDown, Up, sus = [susDiag1, susDiag3, ...]
#susDiag1 = [[col1, row1], [col3, row3], ...]
sus = []
susDiagDown = []
lenSusDiagDown = []
susDiagUp = []
lenSusDiagUp = []
susDuplicate = []
for i in range(dimensionY):
susEachDiagDown = []
originalDiagLen = 0
for j in range(dimensionY):
try:
if (metaboard[i + j][j] == signature): susEachDiagDown.append([i + j, j])
originalDiagLen += 1
except IndexError:
pass
susDiagDown.append(susEachDiagDown)
if (len(susEachDiagDown) != 0):
lenSusDiagDown.append(originalDiagLen)
else: lenSusDiagDown.append(0)
for i in range(dimensionX):
susEachDiagUp = []
originalDiagLen = 0
for j in range(dimensionX):
try:
if (metaboard[j][i + j] == signature): susEachDiagUp.append([j, i + j])
originalDiagLen += 1
except IndexError: pass
susDiagUp.append(susEachDiagUp)
if (len(susEachDiagUp) != 0):
lenSusDiagUp.append(originalDiagLen)
else: lenSusDiagUp.append(0)
sus.extend(susDiagDown)
sus.extend(susDiagUp)
for i in range(min(dimensionX, dimensionY)):
if (metaboard[i][i] == signature): susDuplicate.append([i, i])
sus.remove(susDuplicate)
susDiagUp = [susEachDiag for susEachDiag in susDiagUp if len(susEachDiag) != 0]
lenSusDiagUp = [eachLen for eachLen in lenSusDiagUp if eachLen != 0]
susDiagDown = [susEachDiag for susEachDiag in susDiagDown if len(susEachDiag) != 0]
lenSusDiagDown = [eachLen for eachLen in lenSusDiagDown if eachLen != 0]
#Filter out all invalid segments between two blocked self diagontally
for susEachDiag in sus:
for i in range(len(susEachDiag) - 1):
if (2 <= susEachDiag[i + 1][0] - susEachDiag[i][0] <= winCond):
for k in range(0, susEachDiag[i + 1][0] - susEachDiag[i][0]):
if (isinstance(metaboard[susEachDiag[i][0] + k][susEachDiag[i][1] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {susEachDiag[i][0]}:{susEachDiag[i][1]} and {susEachDiag[i + 1][0]}:{susEachDiag[i + 1][1]}, the position with the coordinates {susEachDiag[i][0] + k}:{susEachDiag[i][1] + k} has been nullified of its {type}\'s {pos}.')
metaboard[susEachDiag[i][0] + k][susEachDiag[i][1] + k][type][pos] = 0
#Filter out all invalid segments between self and border for susDiagUp
for susEachDiag in susDiagUp:
start = susEachDiag[0]
end = susEachDiag[-1]
if (1 <= min(start[0], start[1]) < winCond):
for k in range(0, min(start[0], start[1]) + 1):
if (isinstance(metaboard[start[0] - k][start[1] - k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {start[0]}:{start[1]} and the corner, the position with the coordinates {start[0] + k}:{start[1] + k} has been nullified of its {type}\'s {pos}.')
metaboard[start[0] - k][start[1] - k][type][pos] = 0
if (1 <= lenSusDiagUp[susDiagUp.index(susEachDiag)] - min(end[0], end[1]) <= winCond):
for k in range(0, lenSusDiagUp[susDiagUp.index(susEachDiag)] - min(end[0], end[1])):
if (isinstance(metaboard[end[0] + k][end[1] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {end[0]}:{end[1]} and the corner, the position with the coordinates {end[0] + k}:{end[1] + k} has been nullified of its {type}\'s {pos}.')
metaboard[end[0] + k][end[1] + k][type][pos] = 0
#Filter out all invalid segments between self and border for susDiagDown
for susEachDiag in susDiagDown:
start = susEachDiag[0]
end = susEachDiag[-1]
if (1 <= min(start[0], start[1]) < winCond):
for k in range(0, min(start[0], start[1]) + 1):
if (isinstance(metaboard[start[0] - k][start[1] - k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {start[0]}:{start[1]} and the corner, the position with the coordinates {start[0] + k}:{start[1] + k} has been nullified of its {type}\'s {pos}.')
metaboard[start[0] - k][start[1] - k][type][pos] = 0
if (1 <= lenSusDiagDown[susDiagDown.index(susEachDiag)] - min(end[0], end[1]) <= winCond):
for k in range(0, lenSusDiagDown[susDiagDown.index(susEachDiag)] - min(end[0], end[1])):
if (isinstance(metaboard[end[0] + k][end[1] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {end[0]}:{end[1]} and the corner, the position with the coordinates {end[0] + k}:{end[1] + k} has been nullified of its {type}\'s {pos}.')
metaboard[end[0] + k][end[1] + k][type][pos] = 0
return metaboard
#pos: index of relevant value (0: horizontal, 1: vertical, 2: NW - SE, 3: NE - SW)
#Screen top left corner
screenTopLeftCorner(metaboard, winCond, 3, 'top left')
metaboard = rotate(metaboard)
#Screen top right corner
screenTopLeftCorner(metaboard, winCond, 2, 'top right')
metaboard = rotate(metaboard)
#Screen bottom right corner
screenTopLeftCorner(metaboard, winCond, 3, 'bottom right')
metaboard = rotate(metaboard)
#Screen bottom left corner
screenTopLeftCorner(metaboard, winCond, 2, 'bottom left')
metaboard = rotate(metaboard)
#Screen horizontally
screenHorizontal(metaboard, selfSignature, 'danger' , winCond, 0)
screenHorizontal(metaboard, opponentSignature, 'opportunity' , winCond, 0)
metaboard = rotate(metaboard)
#Screen vertically
screenHorizontal(metaboard, selfSignature, 'danger' , winCond, 1)
screenHorizontal(metaboard, opponentSignature, 'opportunity' , winCond, 1)
for i in range(3): metaboard = rotate(metaboard)
#Screen NW-SE diagonally
screenDiagonal(metaboard, selfSignature, 'danger' , winCond, 2)
screenDiagonal(metaboard, opponentSignature, 'opportunity' , winCond, 2)
metaboard = rotate(metaboard)
#Screen NE-SW diagonally
screenDiagonal(metaboard, selfSignature, 'danger' , winCond, 3)
screenDiagonal(metaboard, opponentSignature, 'opportunity' , winCond, 3)
for i in range(3): metaboard = rotate(metaboard)
metaboard = mapMetaBoard(len(board[0]), len(board))
dangerCoords = locate([opponentSignature], board)
opportunityCoords = locate([selfSignature], board)
for coord in dangerCoords:
metaboard[coord[1]][coord[0]] = opponentSignature
for coord in opportunityCoords:
metaboard[coord[1]][coord[0]] = selfSignature
for coord in dangerCoords:
sweep(metaboard, coord, 'danger', opponentSignature, selfSignature, winCond)
for coord in opportunityCoords:
sweep(metaboard, coord, 'opportunity', opponentSignature, selfSignature, winCond)
#Screening applies for difficulty 2 and up
if (difficulty >= 2):
screen(metaboard, selfSignature, opponentSignature, winCond)
return metaboard
#Define function to choose between aggresive or defensive
def stance(metaboard, difficulty):
dangerList = []
opportunityList = []
for row in metaboard:
for col in row:
if (isinstance(col, list)):
dangerList.append(max(col[0]))
opportunityList.append(max(col[1]))
pressingDanger = max(dangerList)
pressingOpportunity = max(opportunityList)
#print(f'Highest danger is {pressingDanger}, whilst highest opportunity is {pressingOpportunity}.')
#'Tactical' playstyle applies only for difficulty 3
if (difficulty >= 3):
if (pressingOpportunity > pressingDanger):
return 'aggressive', pressingOpportunity
elif (pressingOpportunity == pressingDanger):
return 'tactical', pressingOpportunity
else:
return 'defensive', pressingDanger
else:
if (pressingOpportunity >= pressingDanger):
return 'aggressive', pressingOpportunity
else:
return 'defensive', pressingDanger
#Define function to make a play
@tail_recursive
def decide(forecasted, checked, style, value, metaboard, difficulty):
if style == 'aggressive': type = 1
elif style == 'defensive': type = 0
else: type = 2
if (style in ['aggressive', 'defensive']):
for row in metaboard:
for col in row:
if (isinstance(col, list)):
if max(col[type]) == value:
#print(col[type].index(value))
x, y = row.index(col), metaboard.index(row)
else:
returnList = []
maxTracker = []
for row in range(len(metaboard)):
for col in range(len(metaboard[0])):
if (isinstance(metaboard[row][col], list)):
if (max(metaboard[row][col][0]) == value) or (max(metaboard[row][col][1]) == value):
#print(col[type].index(value))
returnList.append([col, row])
maxTracker.append(sum(metaboard[row][col][0]) + sum(metaboard[row][col][1]))
x, y = returnList[maxTracker.index(max(maxTracker))][0], returnList[maxTracker.index(max(maxTracker))][1]
if [*forecasted, [x, y]] not in checked:
return x, y
else:
#For a checked position, set metaboard value to negative
metaboardTemp = deepcopy(metaboard)
metaboardTemp[y][x] = [[-1, -1, -1, -1], [-1, -1, -1, -1]]
style, newValue = stance(metaboardTemp, difficulty)
#When all potential positions have been checked, all potential metaboard values will have been set to negative => depleted
if newValue != value: raise ValueError
return recurse(forecasted, checked, style, newValue, metaboardTemp, difficulty)
#Define function to swap self signature and opponent signature
def swap(selfSignature, opponentSignature):
temp = selfSignature
selfSignature = opponentSignature
opponentSignature = temp
return selfSignature, opponentSignature
#Define function to determine if terminal node has been reached
def reachedTerminal(forecasted):
if len(forecasted) >= 1:
last = forecasted[-1][0]
return isinstance(last, bool) or isinstance(last, float)
return False
#Define function to evaluate value of self node
def evalSelf(selfPlaying: bool, possibilities, iteration):
def countExact(values, countItem):
counted = 0
for value in values:
if value is countItem: counted += 1
return counted
#Define function to collapse all forecasted paths with same iteration count
def collapse(selfPlaying: bool, possibilities, iteration):
def contains(values, comparisonItem):
for value in values:
if value is comparisonItem: return True
return False
#Extract all forecasted paths with same iteration count
#print("All possibilities at this stage are: ", possibilities)
extracted = deepcopy([possibility for possibility in possibilities if possibility[-1][1] == iteration])
#if selfPlaying: print("Node layer ", iteration, " and maximizer is playing.")
#else: print("Node layer ", iteration, " and minimizer is playing.")
#print("Before collapse, all values at node layer ", iteration, " is ", extracted)
tempPossibilities = deepcopy([possibility for possibility in possibilities if possibility not in extracted])
#Heuristics: if only 1 or less forecasted at current node, skip collapse
if len(extracted) == 1:
#print("Taking shortcut to skip collapse because only 1 forecasted detected at layer ", iteration, ": ", extracted[0])
tempPossibilities.append(extracted[0])
return tempPossibilities
elif len(extracted) == 0:
#print("Taking shortcut to skip collapse because no forecasted detected at layer ", iteration)
return tempPossibilities
values = [extraction[-1][0] for extraction in extracted]
#print("Performing collapse on ", values)
tieLimiter = False
for value in values:
if isinstance(value, float): tieLimiter = True
#Prioritize boolean: if True exists, all positive possibilities can be pruned
if contains(values, True) and selfPlaying:
values = [value for value in values if not (isinstance(value, float) and value > 0)]
if contains(values, False) and not selfPlaying:
values = [value for value in values if not (isinstance(value, float) and value < 0)]
#When both True and False exists, eliminate any in-between
if contains(values, True) and contains(values, False):
values = [value for value in values if not isinstance(value, float)]
#print("Preliminary sifting is done. Now performing collapse on ", values)
if selfPlaying:
#Due to Python's max([False, 0.0]) -> False, must remove all False if 0.0 exists in maximizer's turn
if tieLimiter and contains(values, False):
values = [value for value in values if value is not False]
returnValue = max(values)
else:
#Due to Python's min([0.0, False]) -> 0.0, must remove all float if False exists in minimizer's turn
if contains(values, False):
returnValue = False
else:
returnValue = min(values)
#print("Collapse done, ", returnValue)
#Deeper eval performed when multiple returnValue in values; choose longest steps for min; shortest steps for max
#Heuristics: when multiple combinations of moves result in same state, keep only 1
if countExact(values, returnValue) > 1:
#print("Multiple forecasted evaluating to the same value detected. Comparing steps for each.")
extractedShortlisted = [forecasted for forecasted in extracted if forecasted[-1][0] is returnValue]
lenList = [len(forecasted) for forecasted in extractedShortlisted]
if selfPlaying:
fullReturnValue = extractedShortlisted[lenList.index(min(lenList))]
else:
fullReturnValue = extractedShortlisted[lenList.index(max(lenList))]
#print("From ", extractedShortlisted, " choose ", fullReturnValue)
else:
#Reconstruct full format of possibility holding returnValue and add back to possibilities
fullReturnValue = [possibility for possibility in extracted if possibility[-1][0] is returnValue][0]
#print("After collapse, all values at node layer ", iteration, " is ", fullReturnValue)
tempPossibilities.append(fullReturnValue)
return tempPossibilities
#Define function to decrement all forecasted paths (should be 1) with iteration count matching current (bubble-up)
def passUp(possibilities, iteration):
for possibility in possibilities:
if possibility[-1][1] == iteration: possibility[-1][1] -= 1
#Identify if a duplicated iteration count exists in possibilities, then collapse all those forecasted depending on self nature
iterationList = [possibility[-1][1] for possibility in possibilities]
#print(iterationList)
for iterationItem in iterationList:
if countExact(iterationList, iterationItem) > 1:
possibilities = collapse(selfPlaying, possibilities, iteration)
#print(iteration)
if (iteration > 0):
passUp(possibilities, iteration)
return possibilities
#Even iteration = machine plays; odd = human
#maxDepthSearch = layer of nodes forecasted ahead by AI -- CAREFUL! O(n) time complexity = b ** m, with m being maxDepthSearch and b being branching factor = (boardDimensionX * boardDimensionY - claimed tiles)
#For 3x3 board, set to 10 for full coverage
if len(board) == len(board[0]) and len(board) == 3:
maxDepthSearch = 10
#If game is in developing phase (i.e, number of placed marks <= 1/2 win condition)
elif max(len(locate(selfSignature, board)), len(locate(opponentSignature, board))) <= winCond/2:
maxDepthSearch = 2
else:
maxDepthSearch = 3
#possibilities = [forecasted1, forecasted2, ...]
#forecasted = [[x1, y1], [x2, y2], [x3, y3]..., [True, iteration]] containing moves of both players until end & boolean of win state(True when self is winner, False otherwise)
#forecasted = [[x1, y1], [x2, y2], [x3, y3]..., [score: float, iteration]] containing moves of both players until maxDepthSearch reached, score is evaluated to assign to board state (0 when tie, +highestTacticalValue when it's self's turn, - otherwise)
#Evaluate value of self node depending on min/max nature, run when all child nodes to maxDepthSearch are explored/ when terminal node is detected
#evalSelf only sifts through forecasteds and collapses those having the same iteration value (vying to value same node)
#When bubble up 1 node, take all forecasteds in possibilities with matching current iteration (if everything is right this should already be collapsed to only 1) and decrement that (to imply this value is passed upwards to parent node and is now parent node's originating value)
if reachedTerminal(forecasted):
selfPlaying = (iteration % 2 == 0)
forecastedCopy = deepcopy(forecasted)
possibilities.append(forecastedCopy)
possibilities = evalSelf(selfPlaying, possibilities, iteration)
iteration -= 1
#Reset back 1 node higher
forecasted.pop(-1)
forecasted.pop(-1)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
#Terminal node: winCond is met/maxDepthSearch reached/no possible moves left
if win(board, winCond, selfSignature, opponentSignature) or win(board, winCond, opponentSignature, selfSignature) or len(locate(' ', board)) == 0 or iteration == maxDepthSearch:
if forecasted not in checked:
checked.append(deepcopy(forecasted))
#If self/other is winner, document move
if win(board, winCond, selfSignature, opponentSignature):
#If it's computer's turn, and computer wins
if (iteration % 2 == 0):
forecasted.append([True, iteration])
#print("Forecasted a possible win if moves are as followed: ", forecasted)
#viewBoard(board)
else:
forecasted.append([False, iteration])
#print("Forecasted a possible loss if moves are as followed: ", forecasted)
#viewBoard(board)
elif win(board, winCond, opponentSignature, selfSignature):
#If it's computer's turn, and computer's opponent wins
if (iteration % 2 == 0):
forecasted.append([False, iteration])
#print("Forecasted a possible loss if moves are as followed: ", forecasted)
#viewBoard(board)
else:
forecasted.append([True, iteration])
#print("Forecasted a possible win if moves are as followed: ", forecasted)
#viewBoard(board)
elif iteration == maxDepthSearch:
metaboard = meta(board, opponentSignature, selfSignature, winCond, difficulty)
try:
style, value = stance(metaboard, difficulty)
#If self's turn
if (iteration % 2 == 0):
forecasted.append([float(value), iteration])
#print("Max search depth reached: ", forecasted)
#viewBoard(board)
else:
forecasted.append([float(-value), iteration])
#print("Max search depth reached: ", forecasted)
#viewBoard(board)
#When maxDepthSearch is reached, but game is also tied
except ValueError:
forecasted.append([0.0, iteration])
#print("Forecasted a possible tie at max depth search if moves are as followed: ", forecasted)
#viewBoard(board)
#When tie is reached through tiles depletion, score is set to 0.0
else:
forecasted.append([0.0, iteration])
#print("Forecasted a possible tie if moves are as followed: ", forecasted)
#viewBoard(board)
#Reset back 1 node higher
boardHistory.pop(-1)
board = deepcopy(boardHistory[-1])
#print("Breakpoint 2: Reset board back to ")
#viewBoard(board)
selfSignature, opponentSignature = swap(selfSignature, opponentSignature)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
#At each node layer, make a decision and "forecast" board and metaboard, then switch position with opponent and do the same
#Normal case: when self node is not terminal, and all children are not depleted yet/maxDepthSearch is not reached yet
#dimension = len(board)
metaboard = meta(board, opponentSignature, selfSignature, winCond, difficulty)
#Heuristics: if there is only one available move left, take that move
if (len(locate(' ', board)) == 1):
x = locate(' ', board)[0][0]
y = locate(' ', board)[0][1]
#For actual move; only apply when not projecting self as opponent
if (len(checked) == 0 and iteration == 0):
alphabet = ascii_uppercase
print(f'Computer has decided to play at column {alphabet[x]} and row {alphabet[y]}.\n\n')
board = boardHistory[0]
board[y][x] = selfSignature
viewBoard(board)
return board
#For a forecasted move
elif [*forecasted, [x, y]] not in checked:
forecasted.append([x, y])
checked.append(deepcopy(forecasted))
board[y][x] = selfSignature
boardHistory.append(deepcopy(board))
iteration += 1
selfSignature, opponentSignature = swap(selfSignature, opponentSignature)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
style, value = stance(metaboard, difficulty)
try:
#For first move only
if len(locate(selfSignature, board)) == 0 and len(locate(opponentSignature, board)) == 0:
#For symmetrical board or customized board dimension smaller than twice win condition
if len(board) == len(board[0]) or (len(board) < winCond * 2) or (len(board[0]) < winCond * 2):
move = [int(len(board[0])/2), int(len(board)/2)]
#For customized board dimension larger than twice win condition
else:
move = [randint(winCond, len(board[0]) - 1 - winCond), randint(winCond, len(board) - 1 - winCond)]
x = move[0]
y = move[1]
alphabet = ascii_uppercase
print(f'Computer has decided to play at column {alphabet[x]} and row {alphabet[y]}.\n\n')
board = boardHistory[0]
board[y][x] = selfSignature
viewBoard(board)
return board
else:
x, y = decide(forecasted, checked, style, value, metaboard, difficulty)
except ValueError:
depleted = True
#All child nodes had been depleted (i.e, checked has been populated with all possible forecasted combinations)
if depleted:
depleted = False
selfPlaying = (iteration % 2 == 0)
possibilities = evalSelf(selfPlaying, possibilities, iteration)
iteration -= 1
#If base case had been evaluated; root has been given value; iteration is negative => make a move
#All child branches had been depleted
if iteration < 0:
#print(possibilities)
move = possibilities[0][0]
x = move[0]
y = move[1]
alphabet = ascii_uppercase
print(f'Computer has decided to play at column {alphabet[x]} and row {alphabet[y]}.\n\n')
board = boardHistory[0]
board[y][x] = selfSignature
viewBoard(board)
return board
forecasted.pop(-1)
boardHistory.pop(-1)
board = deepcopy(boardHistory[-1])
#print("Breakpoint 1: Reset board back to ")
#viewBoard(board)
selfSignature, opponentSignature = swap(selfSignature, opponentSignature)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
forecasted.append([x, y])
checked.append(deepcopy(forecasted))
board[y][x] = selfSignature
#print(selfSignature, " took the move ", [x, y])
#viewBoard(board)
boardHistory.append(deepcopy(board))
#print(f'Assessing risk and opportunity, taking {style} move this turn at col {x}, row {y}.')
# valid = False
# while (not valid):
# x = randint(0, dimension - 1)
# y = randint(0, dimension - 1)
# if board[y][x] == ' ': valid = True
iteration += 1
#Swap player each turn
selfSignature, opponentSignature = swap(selfSignature, opponentSignature)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
#Define winning
def win(board, winCond, signature, opponentSignature):
#Define function to determine box containing played area
def box(board):
#Define function to find first occurence of 'X' or 'O', row-wise; if none is found, return 0
#Value is [signature, opponentSignature]
def locate(value, board):
dimensionY = len(board)
dimensionX = len(board[0])
for row in range(dimensionY):
for col in range(dimensionX):
if (board[row][col] in value):
return row
return 0
#Define function to inverse board vertically
def invertY(board):
invertYBoard = []
dimensionY = len(board)
for row in range(dimensionY):
invertYBoard.append(board[dimensionY - row - 1])
return invertYBoard
#Define function to rotate board 90 degree
def rotate(board):
rotateBoard = []
dimensionY = len(board)
dimensionX = len(board[0])
for col in range(dimensionX):
column = [board[row][col] for row in range(dimensionY)]
rotateBoard.append(column)
return rotateBoard
dimensionY = len(board)
dimensionX = len(board[0])
boundaryN = locate([signature, opponentSignature], board)
boundaryS = dimensionY - locate([signature, opponentSignature], invertY(board)) - 1
boundaryW = locate([signature, opponentSignature], rotate(board))
boundaryE = dimensionX - locate([signature, opponentSignature], invertY(rotate(board))) - 1
box = []
for row in range(boundaryN, boundaryS + 1):
boxRow = [board[row][col] for col in range(boundaryW, boundaryE + 1)]
box.append(boxRow)
return box
#Create as many winCond x winCond grids as needed to cover the entire played area
def grid(box, winCond):
dimensionY = len(box)
dimensionX = len(box[0])
gridY = dimensionY - winCond + 1
if (gridY < 1): gridY = 1
gridX = dimensionX - winCond + 1
if (gridX < 1): gridX = 1
#List of grids
grids = []
for offsetX in range(gridX):
for offsetY in range(gridY):
grid = []
for row in range(offsetY, offsetY + winCond):
rowY = []
for col in range(offsetX, offsetX + winCond):
try:
rowY.append(box[row][col])
except IndexError: pass
grid.append(rowY)
grids.append(grid)
return grids
for board in grid(box(board), winCond):
#Within each grid:
dimensionY = len(board)
dimensionX = len(board[0])
#Count 'O's in a row
for row in range(dimensionY):
if (board[row].count(signature) >= winCond):
return True
#Count 'O's in a column
columns = []
for col in range(dimensionX):
try:
columns.append([row[col] for row in board])
except IndexError: pass
for col in columns:
if (col.count(signature) >= winCond):
return True
#Count 'O's in a diagonal line
dimension = min(dimensionX, dimensionY)
diagonalsNW = []
diagonalsNE = []
for i in range(dimension):
diagonalNW = []
diagonalNE = []
for j in range(dimension):
try:
diagonalNW.append(board[j][j])
except IndexError: pass
try:
diagonalNE.append(board[j][dimension - j - 1])
except IndexError: pass
diagonalsNW.append(diagonalNW)
diagonalsNE.append(diagonalNE)
for diagonalNW in diagonalsNW:
if (diagonalNW.count(signature) >= winCond):
return True
for diagonalNE in diagonalsNE:
if (diagonalNE.count(signature) >= winCond):
return True
#Game loop
print('Welcome to a game of Tic-tac-toe!\nThe rule is simple: block your opponent before they can get a long enough streak in a continuous row, column or diagonal to win.\n')
mode = True
while (mode):
gamemode = input('Before we start, there are two gamemodes: custom and preset. Which one would you prefer?\n(c) for custom, (p) for preset. ')
if (gamemode not in ['c', 'p']):
print('Unrecognized input command. Please read the instructions carefully and try again.\n')
else:
mode = False
print('\n\n')
#Configuration settings for custom gamemode
configure = True
while (configure):
#Set custom dimension
invalid = True
while (invalid and gamemode == 'c'):
try:
dimensionX, dimensionY = input('Input dimension for game initialization:\n(width x length): ').split('x')
dimensionX = int(dimensionX)
dimensionY = int(dimensionY)
invalid = False
except:
print('Invalid input detected. Please try again.\n')
#Preset dimension
if (gamemode == 'p'):
print('Default grid set to 26x26.')
dimensionX = 26
dimensionY = 26
#Set win condition
valid = False
while (not valid and gamemode == 'c'):
try:
winCond = input('Input streak size to count as win: ')
winCond = int(winCond)
if (not isinstance(winCond, int) or 3 > winCond > min(dimensionX, dimensionY)): raise TypeError
valid = True
except:
print('Invalid input detected. Please try again.\n')
#Preset win condition
if (gamemode == 'p'):
print('Default win streak set to 5.')
winCond = 5
#Set difficulty
chose = False
while (not chose and gamemode == 'c'):
try:
difficulty = int(input('Choose difficulty (easiest: 1 - hardest: 3): '))
if (3 < difficulty or difficulty < 1): raise ValueError
chose = True
except:
print('Invalid input detected. Please try again.\n')
#Preset difficulty
if (gamemode == 'p'):
print('Default difficulty set to 3.')
difficulty = 3
#Set player's marker
proper = False
while (not proper and gamemode == 'c'):
marker = input('Choose your prefered marker:\n(o) for \'O\', (x) for \'X\': ')
if (marker not in ['x', 'o']):
print('Invalid input detected. Please try again.\n')
else:
proper = True
if (marker == 'o'):
opponentSignature = 'O'
selfSignature = 'X'
else:
|
#Preset marker
if (gamemode == 'p'):
print('Default player marker set to \'X\'.')
opponentSignature = 'X'
selfSignature = 'O'
#Choose who goes first
ok = False
while (not ok and gamemode == 'c'):
playerGoesFirst = input('Do you want to go first?\n(y) for yes, (n) for no: ')
if (playerGoesFirst not in ['y', 'n']):
print('Invalid input detected. Please try again.\n')
else:
ok = True
playerGoesFirst = (playerGoesFirst == 'y')
#Preset first play
if (gamemode == 'p'):
print('Default: computer goes first.')
playerGoesFirst = False
#Replay loop
replay = True
while (replay):
print('\n\n')
board = mapBoard(int(dimensionX), int(dimensionY), ' ')
viewBoard(board)
while (True):
try:
locate([' '], board)[0]
except IndexError:
print('\nIt\'s a tie!')
break
#Player plays
if (playerGoesFirst):
mark(board, opponentSignature)
if (win(board, winCond, opponentSignature, selfSignature)):
print('Congratulations, you won!')
break
playerGoesFirst = True
try:
locate([' '], board)[0]
except IndexError:
print('\nIt\'s a tie!')
break
print('\n\nComputer is calculating...')
#Computer plays
board = play([deepcopy(board)], False, [], 0, winCond, [], [], board, selfSignature, opponentSignature, difficulty)
if (win(board, winCond, selfSignature, opponentSignature)):
print('Sorry, you lost!')
break
#Replay choice
makingChoice = True
while makingChoice:
choice = input('\n\nDo you want to replay?\n(y) to replay with current configurations, (n) to quit, (p) to play with recommended configurations, or (c) to replay with different configurations.\n')
if (choice == 'y'):
replay = True
configure = False
print('\n\n')
makingChoice = False
elif (choice == 'n'):
replay = False
configure = False
makingChoice = False
elif (choice == 'p'):
replay = False
configure = True
gamemode = 'p'
print('\n\n')
makingChoice = False
elif (choice == 'c'):
replay = False
configure = True
gamemode = 'c'
print('\n\n')
makingChoice = False
else:
print('Invalid input detected. Please try again.\n')
input('\nPress ENTER to quit.') | opponentSignature = 'X'
selfSignature = 'O' |
group___s_t_m8_a_f___s_t_m8_s_struct___c_a_n__t_8_page_8_p_a_g_e__0_8_m_d_a_r1.js | var group___s_t_m8_a_f___s_t_m8_s_struct___c_a_n__t_8_page_8_p_a_g_e__0_8_m_d_a_r1 =
[
[ "DATA", "group___s_t_m8_a_f___s_t_m8_s.html#ae44f9e348e41cb272efa87387728571b", null ] | ]; |
|
randomRun.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This is rumdom run node.
subscribe No topcs.
Publish 'cmd_vel' topic.
mainly use for simple sample program
by Takuya Yamaguhi.
'''
import rospy
import random
from geometry_msgs.msg import Twist
class RandomBot():
def __init__(self, bot_name="NoName"):
# bot name
self.name = bot_name
# velocity publisher
self.vel_pub = rospy.Publisher('cmd_vel', Twist,queue_size=1)
def calcTwist(self):
value = random.randint(1,1000)
if value < 250:
x = 0.2
th = 0
elif value < 500:
x = 0.2
th = 0
elif value < 750:
x = 0
th = 1
elif value < 1000:
x = 0
th = -1
else:
x = 0
th = 0
twist = Twist()
twist.linear.x = x; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = th
return twist
def strategy(self):
|
if __name__ == '__main__':
rospy.init_node('random_run')
bot = RandomBot('Random')
bot.strategy()
| r = rospy.Rate(1) # change speed 1fps
target_speed = 0
target_turn = 0
control_speed = 0
control_turn = 0
while not rospy.is_shutdown():
twist = self.calcTwist()
print(twist)
self.vel_pub.publish(twist)
r.sleep() |
index.js | import React from "react"
import Layout from "../components/layout"
import Hero from "../components/hero"
import Header from '../components/header'
const IndexPage = ({ data }) => {
return (
<Layout>
<Header />
<Hero /> | )
}
export default IndexPage | </Layout> |
pattern_match_check.rs | //! The module where the pattern match type checking is implemented
use ast::*;
use env::{Environment, TypeInfo};
use error::{TypeCheckError, MismatchedTypesError};
use processing::TypeCheck;
/// That trait that must be implemented by the part of the AST for pattern match type checking
pub trait PatternMatchCheck: TypeCheck {
fn check_match(&mut self,
rhs: &mut Self,
env: &mut Environment<TypeInfo>)
-> Result<(), TypeCheckError>;
}
impl PatternMatchCheck for Expr {
fn check_match(&mut self,
rhs: &mut Self,
env: &mut Environment<TypeInfo>)
-> Result<(), TypeCheckError> {
use ast::Expr::*;
if let Variable { ref name, span } = *self {
let mut assign = Expr::Assign {
name: name.clone(),
name_span: span,
value: Box::new(rhs.clone()),
// TODO
value_span: Span(0, 0),
};
assign.type_check(env)?;
Ok(())
} else {
let my_type = self.type_check(env)?;
let rhs_type = rhs.type_check(env)?;
if my_type != rhs_type {
// TODO
return Err(MismatchedTypesError::new(my_type.into(), rhs_type, Span(0, 0)).into());
}
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use env::Environment;
use error::*;
use parser;
use processing::TypeCheck;
use type_sys::Generic;
use type_sys::Type::*;
macro_rules! assert_err {
( $expr:expr, $err:pat ) => {
let res = parser::parse_Expression($expr)
.unwrap()
.type_check(&mut Environment::new());
assert!(match res {
Err($err) => true,
_ => false,
});
};
( $expr:expr, $err:pat if $guard:expr ) => {
let res = parser::parse_Expression($expr)
.unwrap()
.type_check(&mut Environment::new());
assert!(match res {
Err($err) if $guard => true,
_ => false,
});
};
}
macro_rules! assert_type {
( $expr:expr, $ok:expr ) => {
assert_eq!(parser::parse_Expression($expr)
.unwrap()
.type_check(&mut Environment::new()),
Ok($ok));
}
}
#[test]
fn value() {
assert_err!("match 1 := 2+3.4",
TypeCheckError::NoSuchSignature(NoSuchSignatureError { ref func_name, .. })
if func_name == "+");
assert_type!("match 1 := 1", Bool);
assert_err!("match 1 := 1.",
TypeCheckError::MismatchedTypes(MismatchedTypesError {
expected: Generic::Builtin(Integer),
got: Float,
..
}));
assert_err!(r#"match 1 := "hello""#,
TypeCheckError::MismatchedTypes(MismatchedTypesError {
expected: Generic::Builtin(Integer),
got: Str,
..
}));
}
#[test]
fn array() {
assert_err!("match [1] := [2+3.4]",
TypeCheckError::NoSuchSignature(NoSuchSignatureError { ref func_name, .. })
if func_name == "+");
assert_type!("match Integer[] := Integer[]", Bool);
assert_err!("match Integer[] := Float[]",
TypeCheckError::MismatchedTypes(MismatchedTypesError {
ref expected,
ref got,
..
})
if *expected == Generic::Builtin(Array(Box::new(Integer))) &&
*got == Array(Box::new(Float)));
}
#[test]
fn | () {
assert_err!("match {1} := {2+3.4}",
TypeCheckError::NoSuchSignature(NoSuchSignatureError { ref func_name, .. })
if func_name == "+");
assert_type!("match {} := {}", Bool);
assert_err!("match {1} := {2.}",
TypeCheckError::MismatchedTypes(MismatchedTypesError {
ref expected,
ref got,
..
})
if *expected == Generic::Builtin(Tuple(vec![Integer])) &&
*got == Tuple(vec![Float]));
}
}
| tuple |
camera.js | /*
*
* The device’s camera, the UI for progressbar, and the function to upload
* the photo to the server
*
*/
var gradients = require('gradients');
var UI = require('ui');
//
// Global variables
//
var cameraAvailable = true;
//
// Set up the cameraWindow
//
// The cameraWindow is mainly used to show a progress bar when uploading.
//
// The device’s camera interface is not actually part of the cameraWindow,
// it is implemented as a modal on top of it.
//
// When we click on the cameraTab, the cameraWindow is visible for a flash,
// before the camera interface kicks in. Because the cameraWindow’s intended
// gradient background is too distracting in this case, we first set it to
// a black background. The progress bar is also hidden at this point.
//
// Only when an upload is triggered, the cameraWindow get its designated
// gradient, and the progress bar is shown.
var cameraWindow = Ti.UI.createWindow({
orientationModes: [Ti.UI.PORTRAIT],
backgroundColor: 'black',
navBarHidden: true,
});
var cameraProgressBar = Ti.UI.createProgressBar({
width: '200dp',
height: '50dp',
min: 0,
max: 1,
value: 0, | top: '10dp',
message: 'Uploading image',
color: 'rgb(103,103,113)',
});
cameraWindow.add(cameraProgressBar);
var close = function() {
cameraWindow.setBackgroundGradient({});
cameraProgressBar.hide();
cameraProgressBar.value = 0;
};
//
// The actual upload function, as called by the camera
//
// It happens in too times: first the metadata is uploaded,
// than the file itself. This was the most straight-forward—
// but if one can find the right way to encode the data as form/multipart
// it should be possible in one go as well
var uploadPhoto = function(media) {
Ti.API.info("uploadPhoto called");
cameraWindow.setBackgroundGradient(gradients.currentGradient());
cameraProgressBar.show();
var now = new Date().toISOString();
var mime = media.mimeType;
var extension = utils.mime2extensionDict[mime]; // recognise jpg, png, tiff (maybe add gif, bmp?)
if (!extension) {
UI.alertError("Do not know how to handle photos of the type " + mime);
return false;
}
var filename = now + '_raduga_by_' + Ti.App.Properties.getString('username') + extension;
// this is all the metadata we want to send:
var photoData = {
filename: filename,
content_type: mime,
size: media.size,
custom_fields: {
"name_en": Ti.App.Properties.getString('city_name_en'),
"name_ru": Ti.App.Properties.getString('city_name_ru'),
"coordinates": [ [parseFloat(Ti.App.Properties.getString('city_lon')), parseFloat(Ti.App.Properties.getString('city_lat'))] ]
},
user: {
username: Ti.App.Properties.getString('username'),
id: Ti.App.Properties.getString('userid')
},
created_at: now,
updated_at: now,
processed: false
};
// For now, we authenticate through HTTP basic authentication with the username
var authstr = 'Basic ' + Ti.Utils.base64encode(Ti.App.Properties.getString('userid') + ':');
var xhr = Ti.Network.createHTTPClient({
onload: function() {
var response = JSON.parse(this.responseText);
Ti.API.info(JSON.stringify(response));
if (response._status === "ERR") {
UI.alertError('Failed uploading photo metadata, API trouble: ' + this.responseText);
return false;
}
Ti.API.info("Succesfully uploaded photo metadata, with _id " + response._id + " to the server");
// We have succesfully uploaded the image!
// now we need to upload the image itself.
// We create a second request:
var secondXhr = Titanium.Network.createHTTPClient({
onload: function(e) {
// example response:
// {"_updated":"Thu, 29 May 2014 15:57:29 GMT","_status":"OK","_id":"538758e922497d0249bb9662","_links":{"self":{"href":"127.0.0.1:5000/photos/538758e922497d0249bb9662","title":"Photo"}},"_etag":"bfb6ba7eb0ff446e682b6be0f9cc6b28d7e09ae1"}
var response = JSON.parse(this.responseText);
if (response._status === "ERR") {
UI.alertError('Failed uploading photo file, API trouble: ' + this.responseText);
return false;
}
Ti.API.info("Succesfully uploaded photo: " + JSON.stringify(response));
// We are done here!
// switch to the tab that shows the photos
Ti.App.fireEvent('photosUpdate');
Ti.App.fireEvent('switchTab', {'tab': 'photos'});
close();
},
onerror: function(e) {
Ti.API.info(this.responseText);
UI.alertError('Failed uploading photo file: ' + e.error + '\n\n' + this.responseText);
close();
},
onsendstream: function(e) {
cameraProgressBar.value = e.progress ; // continously sends values from 0 until 1
}
});
secondXhr.open('POST', 'http://' + response._links.self.href);
secondXhr.setRequestHeader('X-HTTP-Method-Override', 'PATCH'); // in iOS we can sent a PATCH request directly,
// but in (Titanium’s implementation of) Android we can’t
// Concurrency checking disabled for now, because of https://github.com/nicolaiarocci/eve/issues/369 (is going to be available in 0.5)
// secondXhr.setRequestHeader('If-Match', response._etag);
secondXhr.setRequestHeader('Authorization', authstr);
secondXhr.send({
id: response._id,
image: media,
});
},
onerror: function(e) {
UI.alertError('Failed uploading metadata camera: ' + e.error);
close();
}
});
// Here we upload the metadata
// xhr.open('POST','http://192.168.0.10:5000/photos/');
xhr.open('POST','http://vps40616.public.cloudvps.com/photos/');
xhr.setRequestHeader("Content-Type","application/json; charset=utf-8");
xhr.setRequestHeader('Authorization', authstr);
xhr.send(JSON.stringify(photoData));
};
//
// Camera Behaviour: this is what happens when we press the camera button
//
// from the example http://docs.appcelerator.com/titanium/3.0/#!/guide/Camera_and_Photo_Gallery_APIs :
var showCam = function() {
// this is to prevent the bug noted a bit further down
if (!cameraAvailable) {
Ti.API.info('tried to trigger showCam while still locked');
return;
}
Ti.API.info("showCam called");
if (!Ti.App.Properties.getString('sessionID')) {
UI.alertError(L("signin_before_upload"));
Ti.App.fireEvent('switchTab', {'tab': 'settings'});
return false;
}
if (Ti.Network.getNetworkTypeName() === "NONE") {
UI.alertError(L("camera_no_internet"));
Ti.App.fireEvent('switchTab', {'tab': 'photos'});
return false;
}
Ti.Media.showCamera({
success:function(event) {
if(event.mediaType === Ti.Media.MEDIA_TYPE_PHOTO) {
// there is a bug with the tab getting focus whenever a picture
// is taken, which triggers the camera, causing a loop
// this is a really crude way around it: lock the camera,
// and make it available after a second.
cameraAvailable = false;
setTimeout(function() { cameraAvailable = true; }, 4000);
uploadPhoto(event.media);
} else {
UI.alertError("Camera got the wrong type back: " + event.mediaType);
}
},
cancel:function() {
// called when user cancels taking a picture
Ti.App.fireEvent('switchTab', {'tab': 'photos'});
close();
},
error:function(error) {
close();
// called when there's an error
var a = Ti.UI.createAlertDialog({title:L('camera')});
if (error.code === Ti.Media.NO_CAMERA) {
a.setMessage('Please run this test on device');
// if one wants to test uploading photos from the simulator, enable this code:
// var photo = Ti.Filesystem.getFile('ui/upload_test_photo.jpg');
// uploadPhoto(photo.read.blob);
} else {
a.setMessage(L('error') + ': ' + error.code);
}
a.show();
},
saveToPhotoGallery: true,
autoHide: true,
mediaTypes:[Ti.Media.MEDIA_TYPE_PHOTO]
});
};
//
// Public exports
//
exports.Camera = function() {
this.window = cameraWindow;
this.showCam = showCam;
}; | |
__init__.py | """
This module implements the Rubik's Cube formulae.
You can deal with Rubik's Cube formulae easily with Step and Formula.
Usage:
>>> a = Formula("R U R' U'")
>>> a
R U R' U'
>>> a.reverse()
>>> a
U R U' R'
>>> a.mirror()
>>> a
U' L' U L
>>> a *= 3
>>> a
U' L' U L U' L' U L U' L' U L
"""
from .move import GenericCubicMove, Move
from .formula import BaseFormula
class GenericCubicFormula(BaseFormula):
|
class Formula(GenericCubicFormula):
_move = Move
__all__ = ["GenericCubicMove", "Move", "GenericCubicFormula", "Formula"]
| _move = GenericCubicMove |
index.js | THREE.Object3D.DefaultUp.set(0, 0, 1);
// const tr = THREE;
// const zerorpc = require("zerorpc");
// const fs = require('fs');
// // import { } from 'app:../node_modules/ccapture.js/build/CCapture.all.min.js';
// // import {CCapture} from 'app:../node_modules/ccapture.js/build/CCapture.all.min.js'
// // const HoloPlay = require("holoplay"); | // import * as tr from './vendor/three.module.js'
import {OrbitControls} from './vendor/examples/jsm/controls/OrbitControls.js'
import {Robot, Shape, FPS, SimTime, Slider, Button, Label, Select, Checkbox, Radio} from './lib.js'
// import { start } from 'repl';
let fps = new FPS(document.getElementById('fps'));
let sim_time = new SimTime(document.getElementById('sim-time'));
let camera, scene, renderer, controls;
// Array of all the robots in the scene
let agents = [];
let shapes = [];
let custom_elements = [];
let connected = false;
// Open the connection to python
let port = parseInt(window.location.pathname.slice(1));
let ws = new WebSocket("ws://localhost:" + port + "/")
let recorder = null;
let recording = false;
let framerate = 20;
let autoclose = true;
// let one = new Label({id: "2", desc: "I am a new Label"})
// let two = new Select({id: "3", desc: "Select Box", options: ["one", '2', 'fourt'], value: 1})
// let three = new Button({id: "1", desc: "Hello"})
// let four = new Checkbox({id: "4", desc: "Check Box", options: ["one", '2', 'fourt'], checked: [0, 's', 0]})
// let five = new Radio({id: "5", desc: "Radio Buttons", options: ["one long", '2', 'fourt'], checked: 1})
ws.onopen = function(event) {
connected = true;
ws.send('Connected');
startSim(event.data);
}
ws.onclose = function(event) {
if (recording) {
stopRecording();
}
if (autoclose) {
setTimeout(
function() {
window.close();
}, 5000);
}
}
function startSim(port) {
init()
animate();
window.addEventListener('resize', on_resize, false);
}
function init() {
//
camera = new THREE.PerspectiveCamera(70, window.innerWidth / window.innerHeight, 0.01, 10);
// camera = new HoloPlay.Camera();
scene = new THREE.Scene();
// THREE.Object3D.DefaultUp.set(0, 0, 1);
renderer = new THREE.WebGLRenderer( {antialias: true });
// hrenderer = new HoloPlay.Renderer();
// hrenderer.webglRenderer = renderer;
// hrenderer.disableFullscreenUi = true;
// console.log(hrenderer);
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.shadowMap.enabled = true;
let div = document.getElementById( 'canvas' );
document.body.appendChild(div);
div.appendChild(renderer.domElement)
controls = new OrbitControls( camera, renderer.domElement );
// Set up camera position
camera.position.set(0.2, 1.2, 0.7);
controls.target = new THREE.Vector3(0, 0, 0.2);
controls.update();
// scene.background = new THREE.Color(0x72645b);
scene.background = new THREE.Color(0x787878);
scene.fog = new THREE.Fog(0x787878, 2, 15 );
var plane = new THREE.Mesh(
new THREE.PlaneBufferGeometry( 40, 40 ),
new THREE.MeshPhongMaterial( { color: 0x4B4B4B, specular: 0x101010 } )
);
plane.receiveShadow = true;
scene.add( plane );
// Lights
scene.add( new THREE.HemisphereLight( 0x443333, 0x111122 ) );
addShadowedLight( 1, 1, 1, 0xffffff, 1.35 );
addShadowedLight( 0.5, 1, - 1, 0xffaa00, 1 );
var axesHelper = new THREE.AxesHelper( 5 );
scene.add( axesHelper );
}
function on_resize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function addShadowedLight( x, y, z, color, intensity ) {
var directionalLight = new THREE.DirectionalLight( color, intensity );
directionalLight.position.set( x, y, z );
scene.add( directionalLight );
directionalLight.castShadow = true;
var d = 1;
directionalLight.shadow.camera.left = - d;
directionalLight.shadow.camera.right = d;
directionalLight.shadow.camera.top = d;
directionalLight.shadow.camera.bottom = - d;
directionalLight.shadow.camera.near = 1;
directionalLight.shadow.camera.far = 4;
directionalLight.shadow.bias = - 0.002;
}
function animate() {
requestAnimationFrame(animate);
renderer.render(scene, camera);
if (recording) {
recorder.capture(renderer.domElement);
}
fps.frame();
}
function startRecording(frate, name, format) {
if (!recording) {
if (format === 'gif') {
autoclose = false;
}
recorder = new CCapture({
verbose: false,
display: true,
framerate: frate,
quality: 100,
format: format,
name: name,
workersPath: 'js/vendor/build/'
});
recording = true;
recorder.start();
};
}
function stopRecording() {
recorder.stop();
recorder.save();
recording = false;
}
ws.onmessage = function (event) {
let eventdata = JSON.parse(event.data)
let func = eventdata[0]
let data = eventdata[1]
if (func === 'robot') {
let id = agents.length;
let robot = new Robot(scene, data);
agents.push(robot);
ws.send(id);
} else if (func === 'remove_robot') {
let agent = agents[data]
agent.remove(scene)
renderer.renderLists.dispose();
agents[data] = null;
ws.send(0);
} else if (func === 'shape') {
let id = shapes.length;
let shape = new Shape(scene, data);
shapes.push(shape);
ws.send(id);
} else if (func === 'remove_shape') {
let shape = shapes[data]
shape.remove(scene)
renderer.renderLists.dispose();
shapes[data] = null;
ws.send(0);
} else if (func === 'robot_poses') {
let id = data[0];
let poses = data[1];
agents[id].set_poses(poses);
ws.send(id);
} else if (func === 'shape_poses') {
let id = data[0];
let poses = data[1];
shapes[id].set_poses(poses);
ws.send(id);
} else if (func === 'is_loaded') {
let loaded = agents[data].isLoaded();
ws.send(loaded);
} else if (func === 'sim_time') {
sim_time.display(parseFloat(data));
ws.send(0);
} else if (func === 'start_recording') {
startRecording(parseFloat(data[0]), data[1], data[2]);
ws.send(0);
} else if (func === 'stop_recording') {
stopRecording();
setTimeout(
function() {
ws.send(0);
}, 5000);
} else if (func === 'add_element') {
let element = data.element;
if (element === 'slider') {
custom_elements.push(new Slider(data));
} else if (element === 'button') {
custom_elements.push(new Button(data));
} else if (element === 'label') {
custom_elements.push(new Label(data));
} else if (element === 'select') {
custom_elements.push(new Select(data));
} else if (element === 'checkbox') {
custom_elements.push(new Checkbox(data));
} else if (element === 'radio') {
custom_elements.push(new Radio(data));
}
ws.send(0);
} else if (func === 'check_elements') {
let ret = {};
for (let i = 0; i < custom_elements.length; i++) {
if (custom_elements[i].changed === true) {
ret[custom_elements[i].id] = custom_elements[i].data;
custom_elements[i].changed = false;
}
}
ws.send(JSON.stringify(ret));
} else if (func === 'update_element') {
let id = data.id;
for (let i = 0; i < custom_elements.length; i++) {
if (custom_elements[i].id === id) {
custom_elements[i].update(data);
break;
}
}
ws.send(0);
}
}; | |
yum-pkg.py | from synapse.syncmd import exec_cmd
from synapse.synapse_exceptions import ResourceException
from synapse.logger import logger
log = logger('yum-pkg')
def install(name):
ret = exec_cmd("/usr/bin/yum -q -y install {0}".format(name))
if ret['returncode'] != 0:
raise ResourceException(ret['stderr'])
def get_installed_packages():
ret = exec_cmd("/bin/rpm -qa")
return ret['stdout'].split('\n')
def remove(name):
ret = exec_cmd("/usr/bin/yum -q -y remove {0}".format(name))
if ret['returncode'] != 0:
raise ResourceException(ret['stderr'])
def update(name):
# We need to check first if the package is installed. yum update of a
# non-existing package has a returncode of 0. We need to raise an exception
# if the package is not installed !
inst = is_installed(name)
ret = exec_cmd("/usr/bin/yum -q -y update {0}".format(name))
if ret['returncode'] != 0 or not inst:
raise ResourceException(ret['stderr'])
def is_installed(name):
ret = exec_cmd("/bin/rpm -q {0}".format(name)) | return ret['returncode'] == 0 |
|
redact_query.go | package sqlparser
import querypb "github.com/k0kubun/sqldef/sqlparser/dependency/querypb"
// RedactSQLQuery returns a sql string with the params stripped out for display
func RedactSQLQuery(sql string) (string, error) {
bv := map[string]*querypb.BindVariable{}
sqlStripped, comments := SplitMarginComments(sql)
stmt, err := Parse(sqlStripped)
if err != nil {
return "", err |
return comments.Leading + String(stmt) + comments.Trailing, nil
} | }
prefix := "redacted"
Normalize(stmt, bv, prefix) |
build.rs | fn | () {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-env-changed=RUSTC_INSTALL_BINDIR");
}
| main |
projective_curve.py | """
Projective plane curves over a general ring
AUTHORS:
- William Stein (2005-11-13)
- David Joyner (2005-11-13)
- David Kohel (2006-01)
- Moritz Minzlaff (2010-11)
"""
#*****************************************************************************
# Copyright (C) 2005 William Stein <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.interfaces.all import singular
from sage.misc.all import add, sage_eval
from sage.rings.all import degree_lowest_rational_function
from sage.schemes.projective.projective_space import is_ProjectiveSpace
from curve import Curve_generic_projective
class ProjectiveSpaceCurve_generic(Curve_generic_projective):
def _repr_type(self):
return "Projective Space"
def __init__(self, A, X):
if not is_ProjectiveSpace(A):
raise TypeError("A (=%s) must be a projective space"%A)
Curve_generic_projective.__init__(self, A, X)
d = self.dimension()
if d != 1:
raise ValueError("defining equations (=%s) define a scheme of dimension %s != 1"%(X,d))
class ProjectiveCurve_generic(Curve_generic_projective):
def __init__(self, A, f):
if not (is_ProjectiveSpace(A) and A.dimension != 2):
raise TypeError("Argument A (= %s) must be a projective plane."%A)
Curve_generic_projective.__init__(self, A, [f])
def _repr_type(self):
return "Projective"
def | (self):
r"""
Return the arithmetic genus of this curve.
This is the arithmetic genus `g_a(C)` as defined in
Hartshorne. If the curve has degree `d` then this is simply
`(d-1)(d-2)/2`. It need *not* equal the geometric genus
(the genus of the normalization of the curve).
EXAMPLE::
sage: x,y,z = PolynomialRing(GF(5), 3, 'xyz').gens()
sage: C = Curve(y^2*z^7 - x^9 - x*z^8); C
Projective Curve over Finite Field of size 5 defined by -x^9 + y^2*z^7 - x*z^8
sage: C.arithmetic_genus()
28
sage: C.genus()
4
"""
d = self.defining_polynomial().total_degree()
return int((d-1)*(d-2)/2)
def divisor_of_function(self, r):
"""
Return the divisor of a function on a curve.
INPUT: r is a rational function on X
OUTPUT:
- ``list`` - The divisor of r represented as a list of
coefficients and points. (TODO: This will change to a more
structural output in the future.)
EXAMPLES::
sage: FF = FiniteField(5)
sage: P2 = ProjectiveSpace(2, FF, names = ['x','y','z'])
sage: R = P2.coordinate_ring()
sage: x, y, z = R.gens()
sage: f = y^2*z^7 - x^9 - x*z^8
sage: C = Curve(f)
sage: K = FractionField(R)
sage: r = 1/x
sage: C.divisor_of_function(r) # todo: not implemented !!!!
[[-1, (0, 0, 1)]]
sage: r = 1/x^3
sage: C.divisor_of_function(r) # todo: not implemented !!!!
[[-3, (0, 0, 1)]]
"""
F = self.base_ring()
f = self.defining_polynomial()
x, y, z = f.parent().gens()
pnts = self.rational_points()
divf = []
for P in pnts:
if P[2] != F(0):
# What is the '5' in this line and the 'r()' in the next???
lcs = self.local_coordinates(P,5)
ldg = degree_lowest_rational_function(r(lcs[0],lcs[1]),z)
if ldg[0] != 0:
divf.append([ldg[0],P])
return divf
def local_coordinates(self, pt, n):
r"""
Return local coordinates to precision n at the given point.
Behaviour is flaky - some choices of `n` are worst that
others.
INPUT:
- ``pt`` - an F-rational point on X which is not a
point of ramification for the projection (x,y) - x.
- ``n`` - the number of terms desired
OUTPUT: x = x0 + t y = y0 + power series in t
EXAMPLES::
sage: FF = FiniteField(5)
sage: P2 = ProjectiveSpace(2, FF, names = ['x','y','z'])
sage: x, y, z = P2.coordinate_ring().gens()
sage: C = Curve(y^2*z^7-x^9-x*z^8)
sage: pt = C([2,3,1])
sage: C.local_coordinates(pt,9) # todo: not implemented !!!!
[2 + t, 3 + 3*t^2 + t^3 + 3*t^4 + 3*t^6 + 3*t^7 + t^8 + 2*t^9 + 3*t^11 + 3*t^12]
"""
f = self.defining_polynomial()
R = f.parent()
F = self.base_ring()
p = F.characteristic()
x0 = F(pt[0])
y0 = F(pt[1])
astr = ["a"+str(i) for i in range(1,2*n)]
x,y = R.gens()
R0 = PolynomialRing(F,2*n+2,names = [str(x),str(y),"t"]+astr)
vars0 = R0.gens()
t = vars0[2]
yt = y0*t**0 + add([vars0[i]*t**(i-2) for i in range(3,2*n+2)])
xt = x0+t
ft = f(xt,yt)
S = singular
S.eval('ring s = '+str(p)+','+str(R0.gens())+',lp;')
S.eval('poly f = '+str(ft))
cmd = 'matrix c = coeffs ('+str(ft)+',t)'
S.eval(cmd)
N = int(S.eval('size(c)'))
b = ["c["+str(i)+",1]," for i in range(2,N/2-4)]
b = ''.join(b)
b = b[:len(b)-1] #to cut off the trailing comma
cmd = 'ideal I = '+b
S.eval(cmd)
c = S.eval('slimgb(I)')
d = c.split("=")
d = d[1:]
d[len(d)-1] += "\n"
e = [x[:x.index("\n")] for x in d]
vals = []
for x in e:
for y in vars0:
if str(y) in x:
if len(x.replace(str(y),"")) != 0:
i = x.find("-")
if i>0:
vals.append([eval(x[1:i]),x[:i],F(eval(x[i+1:]))])
i = x.find("+")
if i>0:
vals.append([eval(x[1:i]),x[:i],-F(eval(x[i+1:]))])
else:
vals.append([eval(str(y)[1:]),str(y),F(0)])
vals.sort()
k = len(vals)
v = [x0+t,y0+add([vals[i][2]*t**(i+1) for i in range(k)])]
return v
def plot(self, *args, **kwds):
"""
Plot the real points of an affine patch of this projective
plane curve.
INPUT:
- ``self`` - an affine plane curve
- ``patch`` - (optional) the affine patch to be plotted; if not
specified, the patch corresponding to the last projective
coordinate being nonzero
- ``*args`` - optional tuples (variable, minimum, maximum) for
plotting dimensions
- ``**kwds`` - optional keyword arguments passed on to
``implicit_plot``
EXAMPLES:
A cuspidal curve::
sage: R.<x, y, z> = QQ[]
sage: C = Curve(x^3 - y^2*z)
sage: C.plot()
Graphics object consisting of 1 graphics primitive
The other affine patches of the same curve::
sage: C.plot(patch=0)
Graphics object consisting of 1 graphics primitive
sage: C.plot(patch=1)
Graphics object consisting of 1 graphics primitive
An elliptic curve::
sage: E = EllipticCurve('101a')
sage: C = Curve(E)
sage: C.plot()
Graphics object consisting of 1 graphics primitive
sage: C.plot(patch=0)
Graphics object consisting of 1 graphics primitive
sage: C.plot(patch=1)
Graphics object consisting of 1 graphics primitive
A hyperelliptic curve::
sage: P.<x> = QQ[]
sage: f = 4*x^5 - 30*x^3 + 45*x - 22
sage: C = HyperellipticCurve(f)
sage: C.plot()
Graphics object consisting of 1 graphics primitive
sage: C.plot(patch=0)
Graphics object consisting of 1 graphics primitive
sage: C.plot(patch=1)
Graphics object consisting of 1 graphics primitive
"""
# if user hasn't specified a favourite affine patch, take the
# one avoiding "infinity", i.e. the one corresponding to the
# last projective coordinate being nonzero
patch = kwds.pop('patch', self.ngens() - 1)
from constructor import Curve
C = Curve(self.affine_patch(patch))
return C.plot(*args, **kwds)
def is_singular(C):
r"""
Returns whether the curve is singular or not.
EXAMPLES:
Over `\QQ`::
sage: F = QQ
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X^3-Y^2*Z)
sage: C.is_singular()
True
Over a finite field::
sage: F = GF(19)
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X^3+Y^3+Z^3)
sage: C.is_singular()
False
sage: D = Curve(X^4-X*Z^3)
sage: D.is_singular()
True
sage: E = Curve(X^5+19*Y^5+Z^5)
sage: E.is_singular()
True
sage: E = Curve(X^5+9*Y^5+Z^5)
sage: E.is_singular()
False
Over `\CC`::
sage: F = CC
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X)
sage: C.is_singular()
False
sage: D = Curve(Y^2*Z-X^3)
sage: D.is_singular()
True
sage: E = Curve(Y^2*Z-X^3+Z^3)
sage: E.is_singular()
False
Showing that ticket #12187 is fixed::
sage: F.<X,Y,Z> = GF(2)[]
sage: G = Curve(X^2+Y*Z)
sage: G.is_singular()
False
"""
poly = C.defining_polynomial()
return poly.parent().ideal(poly.gradient()+[poly]).dimension()> 0
class ProjectiveCurve_finite_field(ProjectiveCurve_generic):
def rational_points_iterator(self):
r"""
Return a generator object for the rational points on this curve.
INPUT:
- ``self`` -- a projective curve
OUTPUT:
A generator of all the rational points on the curve defined over its base field.
EXAMPLE::
sage: F = GF(37)
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X^7+Y*X*Z^5*55+Y^7*12)
sage: len(list(C.rational_points_iterator()))
37
::
sage: F = GF(2)
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X*Y*Z)
sage: a = C.rational_points_iterator()
sage: next(a)
(1 : 0 : 0)
sage: next(a)
(0 : 1 : 0)
sage: next(a)
(1 : 1 : 0)
sage: next(a)
(0 : 0 : 1)
sage: next(a)
(1 : 0 : 1)
sage: next(a)
(0 : 1 : 1)
sage: next(a)
Traceback (most recent call last):
...
StopIteration
::
sage: F = GF(3^2,'a')
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X^3+5*Y^2*Z-33*X*Y*X)
sage: b = C.rational_points_iterator()
sage: next(b)
(0 : 1 : 0)
sage: next(b)
(0 : 0 : 1)
sage: next(b)
(2*a + 2 : a : 1)
sage: next(b)
(2 : a + 1 : 1)
sage: next(b)
(a + 1 : 2*a + 1 : 1)
sage: next(b)
(1 : 2 : 1)
sage: next(b)
(2*a + 2 : 2*a : 1)
sage: next(b)
(2 : 2*a + 2 : 1)
sage: next(b)
(a + 1 : a + 2 : 1)
sage: next(b)
(1 : 1 : 1)
sage: next(b)
Traceback (most recent call last):
...
StopIteration
"""
g = self.defining_polynomial()
K = g.parent().base_ring()
from sage.rings.polynomial.all import PolynomialRing
R = PolynomialRing(K,'X')
X = R.gen()
one = K.one()
zero = K.zero()
# the point with Z = 0 = Y
try:
t = self.point([one,zero,zero])
yield(t)
except TypeError:
pass
# points with Z = 0, Y = 1
g10 = R(g(X,one,zero))
if g10.is_zero():
for x in K:
yield(self.point([x,one,zero]))
else:
for x in g10.roots(multiplicities=False):
yield(self.point([x,one,zero]))
# points with Z = 1
for y in K:
gy1 = R(g(X,y,one))
if gy1.is_zero():
for x in K:
yield(self.point([x,y,one]))
else:
for x in gy1.roots(multiplicities=False):
yield(self.point([x,y,one]))
def rational_points(self, algorithm="enum", sort=True):
r"""
Return the rational points on this curve computed via enumeration.
INPUT:
- ``algorithm`` (string, default: 'enum') -- the algorithm to
use. Currently this is ignored.
- ``sort`` (boolean, default ``True``) -- whether the output
points should be sorted. If False, the order of the output
is non-deterministic.
OUTPUT:
A list of all the rational points on the curve defined over
its base field, possibly sorted.
.. note::
This is a slow Python-level implementation.
EXAMPLES::
sage: F = GF(7)
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X^3+Y^3-Z^3)
sage: C.rational_points()
[(0 : 1 : 1), (0 : 2 : 1), (0 : 4 : 1), (1 : 0 : 1), (2 : 0 : 1), (3 : 1 : 0), (4 : 0 : 1), (5 : 1 : 0), (6 : 1 : 0)]
::
sage: F = GF(1237)
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X^7+7*Y^6*Z+Z^4*X^2*Y*89)
sage: len(C.rational_points())
1237
::
sage: F = GF(2^6,'a')
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X^5+11*X*Y*Z^3 + X^2*Y^3 - 13*Y^2*Z^3)
sage: len(C.rational_points())
104
::
sage: R.<x,y,z> = GF(2)[]
sage: f = x^3*y + y^3*z + x*z^3
sage: C = Curve(f); pts = C.rational_points()
sage: pts
[(0 : 0 : 1), (0 : 1 : 0), (1 : 0 : 0)]
"""
points = list(self.rational_points_iterator())
if sort:
points.sort()
return points
class ProjectiveCurve_prime_finite_field(ProjectiveCurve_finite_field):
def _points_via_singular(self, sort=True):
r"""
Return all rational points on this curve, computed using Singular's
Brill-Noether implementation.
INPUT:
- ``sort`` - bool (default: True), if True return the
point list sorted. If False, returns the points in the order
computed by Singular.
EXAMPLE::
sage: x, y, z = PolynomialRing(GF(5), 3, 'xyz').gens()
sage: f = y^2*z^7 - x^9 - x*z^8
sage: C = Curve(f); C
Projective Curve over Finite Field of size 5 defined by
-x^9 + y^2*z^7 - x*z^8
sage: C._points_via_singular()
[(0 : 0 : 1), (0 : 1 : 0), (2 : 2 : 1), (2 : 3 : 1),
(3 : 1 : 1), (3 : 4 : 1)]
sage: C._points_via_singular(sort=False) #random
[(0 : 1 : 0), (3 : 1 : 1), (3 : 4 : 1), (2 : 2 : 1),
(0 : 0 : 1), (2 : 3 : 1)]
.. note::
The Brill-Noether package does not always work (i.e., the
'bn' algorithm. When it fails a RuntimeError exception is
raised.
"""
f = self.defining_polynomial()._singular_()
singular = f.parent()
singular.lib('brnoeth')
try:
X1 = f.Adj_div()
except (TypeError, RuntimeError) as s:
raise RuntimeError(str(s) + "\n\n ** Unable to use the\
Brill-Noether Singular package to\
compute all points (see above).")
X2 = singular.NSplaces(1, X1)
R = X2[5][1][1]
singular.set_ring(R)
# We use sage_flattened_str_list since iterating through
# the entire list through the sage/singular interface directly
# would involve hundreds of calls to singular, and timing issues with
# the expect interface could crop up. Also, this is vastly
# faster (and more robust).
v = singular('POINTS').sage_flattened_str_list()
pnts = [self(int(v[3*i]), int(v[3*i+1]), int(v[3*i+2]))
for i in range(len(v)//3)]
# singular always dehomogenizes with respect to the last variable
# so if this variable divides the curve equation, we need to add
# points at infinity
F = self.defining_polynomial()
z = F.parent().gens()[-1]
if z.divides(F):
pnts += [self(1,a,0) for a in self.base_ring()]
pnts += [self(0,1,0)]
# remove multiple points
pnts = list(set(pnts))
if sort:
pnts.sort()
return pnts
def riemann_roch_basis(self, D):
r"""
Return a basis for the Riemann-Roch space corresponding to
`D`.
This uses Singular's Brill-Noether implementation.
INPUT:
- ``D`` - a divisor
OUTPUT:
A list of function field elements that form a basis of the Riemann-Roch space
EXAMPLE::
sage: R.<x,y,z> = GF(2)[]
sage: f = x^3*y + y^3*z + x*z^3
sage: C = Curve(f); pts = C.rational_points()
sage: D = C.divisor([ (4, pts[0]), (4, pts[2]) ])
sage: C.riemann_roch_basis(D)
[x/y, 1, z/y, z^2/y^2, z/x, z^2/(x*y)]
::
sage: R.<x,y,z> = GF(5)[]
sage: f = x^7 + y^7 + z^7
sage: C = Curve(f); pts = C.rational_points()
sage: D = C.divisor([ (3, pts[0]), (-1,pts[1]), (10, pts[5]) ])
sage: C.riemann_roch_basis(D)
[(-2*x + y)/(x + y), (-x + z)/(x + y)]
.. NOTE::
Currently this only works over prime field and divisors supported on rational points.
"""
f = self.defining_polynomial()._singular_()
singular = f.parent()
singular.lib('brnoeth')
try:
X1 = f.Adj_div()
except (TypeError, RuntimeError) as s:
raise RuntimeError(str(s) + "\n\n ** Unable to use the Brill-Noether Singular package to compute all points (see above).")
X2 = singular.NSplaces(1, X1)
# retrieve list of all computed closed points (possibly of degree >1)
v = X2[3].sage_flattened_str_list() # We use sage_flattened_str_list since iterating through
# the entire list through the sage/singular interface directly
# would involve hundreds of calls to singular, and timing issues with
# the expect interface could crop up. Also, this is vastly
# faster (and more robust).
v = [ v[i].partition(',') for i in range(len(v)) ]
pnts = [ ( int(v[i][0]), int(v[i][2])-1 ) for i in range(len(v))]
# retrieve coordinates of rational points
R = X2[5][1][1]
singular.set_ring(R)
v = singular('POINTS').sage_flattened_str_list()
coords = [self(int(v[3*i]), int(v[3*i+1]), int(v[3*i+2])) for i in range(len(v)//3)]
# build correct representation of D for singular
Dsupport = D.support()
Dcoeffs = []
for x in pnts:
if x[0] == 1:
Dcoeffs.append(D.coefficient(coords[x[1]]))
else:
Dcoeffs.append(0)
Dstr = str(tuple(Dcoeffs))
G = singular(','.join([str(x) for x in Dcoeffs]), type='intvec')
# call singular's brill noether routine and return
T = X2[1][2]
T.set_ring()
LG = G.BrillNoether(X2)
LG = [X.split(',\n') for X in LG.sage_structured_str_list()]
x,y,z = self.ambient_space().coordinate_ring().gens()
vars = {'x':x, 'y':y, 'z':z}
V = [(sage_eval(a, vars)/sage_eval(b, vars)) for a, b in LG]
return V
def rational_points(self, algorithm="enum", sort=True):
r"""
INPUT:
- ``algorithm`` - string:
- ``'enum'`` - straightforward enumeration
- ``'bn'`` - via Singular's brnoeth package.
EXAMPLE::
sage: x, y, z = PolynomialRing(GF(5), 3, 'xyz').gens()
sage: f = y^2*z^7 - x^9 - x*z^8
sage: C = Curve(f); C
Projective Curve over Finite Field of size 5 defined by
-x^9 + y^2*z^7 - x*z^8
sage: C.rational_points()
[(0 : 0 : 1), (0 : 1 : 0), (2 : 2 : 1), (2 : 3 : 1),
(3 : 1 : 1), (3 : 4 : 1)]
sage: C = Curve(x - y + z)
sage: C.rational_points()
[(0 : 1 : 1), (1 : 1 : 0), (1 : 2 : 1), (2 : 3 : 1),
(3 : 4 : 1), (4 : 0 : 1)]
sage: C = Curve(x*z+z^2)
sage: C.rational_points('all')
[(0 : 1 : 0), (1 : 0 : 0), (1 : 1 : 0), (2 : 1 : 0),
(3 : 1 : 0), (4 : 0 : 1), (4 : 1 : 0), (4 : 1 : 1),
(4 : 2 : 1), (4 : 3 : 1), (4 : 4 : 1)]
.. note::
The Brill-Noether package does not always work (i.e., the
'bn' algorithm. When it fails a RuntimeError exception is
raised.
"""
if algorithm == "enum":
return ProjectiveCurve_finite_field.rational_points(self,
algorithm="enum",
sort=sort)
elif algorithm == "bn":
return self._points_via_singular(sort=sort)
elif algorithm == "all":
S_enum = self.rational_points(algorithm = "enum")
S_bn = self.rational_points(algorithm = "bn")
if S_enum != S_bn:
raise RuntimeError("Bug in rational_points -- different\
algorithms give different answers for\
curve %s!"%self)
return S_enum
else:
raise ValueError("No algorithm '%s' known"%algorithm)
def Hasse_bounds(q, genus=1):
r"""
Return the Hasse-Weil bounds for the cardinality of a nonsingular
curve defined over `\GF{q}` of given ``genus``.
INPUT:
- ``q`` (int) -- a prime power
- ``genus`` (int, default 1) -- a non-negative integer,
OUTPUT:
(tuple) The Hasse bounds (lb,ub) for the cardinality of a curve of
genus ``genus`` defined over `\GF{q}`.
EXAMPLES::
sage: Hasse_bounds(2)
(1, 5)
sage: Hasse_bounds(next_prime(10^30))
(999999999999998000000000000058, 1000000000000002000000000000058)
"""
if genus==1:
rq = (4*q).isqrt()
else:
rq = (4*(genus**2)*q).isqrt()
return (q+1-rq,q+1+rq)
| arithmetic_genus |
common.rs | use std::u64;
use ethereum_types::{Address, BigEndianHash, H256, U256};
/// The size of word in EVM is 32 bytes.
#[inline]
pub fn to_word_size(size: u64) -> u64 |
/// Get the total gas of memory cost.
/// C_mem(a) ≡ G_memory * a + (a ^ 2 / 512); where unit of a is word.
#[inline]
pub fn mem_gas_cost(size: u64, memory_gas: u64) -> u64 {
let size = to_word_size(size);
size * memory_gas + ((size * size) >> 9)
}
/// Get the signed 256 from Unsigned 256.
#[inline]
pub fn get_sign(value: U256) -> (U256, bool) {
// The highest bit is 1, indicating that it is negative
let sign = (value >> 255) == U256::one();
(set_sign(value, sign), sign)
}
/// Set the sign for a U256.
#[inline]
pub fn set_sign(value: U256, sign: bool) -> U256 {
if sign {
(!U256::zero() ^ value).overflowing_add(U256::one()).0
} else {
value
}
}
#[inline]
pub fn u256_to_address(value: &U256) -> Address {
Address::from(H256::from_uint(value))
}
#[inline]
pub fn address_to_u256(value: Address) -> U256 {
U256::from(value.as_bytes())
}
#[inline]
pub fn bool_to_u256(val: bool) -> U256 {
if val {
U256::one()
} else {
U256::zero()
}
}
#[inline]
pub fn u256_min(x: U256, y: U256) -> U256 {
if x > y {
y
} else {
x
}
}
#[inline]
pub fn rpad(slice: Vec<u8>, n: usize) -> Vec<u8> {
let slice_len = slice.len();
if n <= slice.len() {
slice
} else {
let mut padded: Vec<u8> = Vec::with_capacity(n);
let mut part1 = slice;
padded.append(&mut part1);
let mut part2 = vec![0; n as usize - slice_len];
padded.append(&mut part2);
padded
}
}
/// Copy data from source by start and size.
#[inline]
pub fn copy_data(source: &[u8], start: U256, size: U256) -> Vec<u8> {
if size.is_zero() {
return Vec::new();
}
let source_len = U256::from(source.len());
let s = u256_min(start, source_len);
let e = u256_min(s + size, source_len);
let data = &source[s.as_usize()..e.as_usize()];
rpad(Vec::from(data), size.as_usize())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_memory_gas_cost() {
assert_eq!(mem_gas_cost(32, 3), 3);
assert_eq!(mem_gas_cost(128, 3), 12);
assert_eq!(mem_gas_cost(129, 3), 15);
assert_eq!(mem_gas_cost(1024, 3), 98);
}
}
| {
if size > u64::MAX - 31 {
return (u64::MAX >> 5) + 1;
}
(size + 31) >> 5
} |
cmd_extensions.py | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import contextlib
import os
import sys
import pytest
import spack.cmd
import spack.config
import spack.extensions
import spack.main
class Extension:
"""Helper class to simplify the creation of simple command extension
directory structures with a conventional format for testing.
"""
def __init__(self, name, root):
"""Create a command extension.
Args:
name (str): The name of the command extension.
root (path object): The temporary root for the command extension
(e.g. from tmpdir.mkdir()).
"""
self.name = name
self.pname = spack.cmd.python_name(name)
self.root = root
self.main = self.root.ensure(self.pname, dir=True)
self.cmd = self.main.ensure('cmd', dir=True)
def add_command(self, command_name, contents):
"""Add a command to this command extension.
Args:
command_name (str): The name of the command.
contents (str): the desired contents of the new command module
file."""
spack.cmd.require_cmd_name(command_name)
python_name = spack.cmd.python_name(command_name)
cmd = self.cmd.ensure(python_name + '.py')
cmd.write(contents)
@pytest.fixture(scope='function')
def extension_creator(tmpdir, config):
"""Create a basic extension command directory structure"""
@contextlib.contextmanager
def _ce(extension_name='testcommand'):
root = tmpdir.mkdir('spack-' + extension_name)
extension = Extension(extension_name, root)
with spack.config.override('config:extensions',
[str(extension.root)]):
yield extension
list_of_modules = list(sys.modules.keys())
try:
yield _ce
finally:
to_be_deleted = [x for x in sys.modules if x not in list_of_modules]
for module_name in to_be_deleted:
del sys.modules[module_name]
@pytest.fixture(scope='function')
def hello_world_extension(extension_creator):
"""Create an extension with a hello-world command."""
with extension_creator() as extension:
extension.add_command('hello-world', """
description = "hello world extension command"
section = "test command"
level = "long"
def setup_parser(subparser):
pass
def hello_world(parser, args):
print('Hello world!')
""")
yield extension
@pytest.fixture(scope='function')
def hello_world_cmd(hello_world_extension):
"""Create and return an invokable "hello-world" extension command."""
yield spack.main.SpackCommand('hello-world')
@pytest.fixture(scope='function')
def hello_world_with_module_in_root(extension_creator):
"""Create a "hello-world" extension command with additional code in the
root folder.
"""
@contextlib.contextmanager
def _hwwmir(extension_name=None):
with extension_creator(extension_name) \
if extension_name else \
extension_creator() as extension:
# Note that the namespace of the extension is derived from the
# fixture.
extension.add_command('hello', """
# Test an absolute import
from spack.extensions.{ext_pname}.implementation import hello_world
# Test a relative import
from ..implementation import hello_folks
description = "hello world extension command"
section = "test command"
level = "long"
# Test setting a global variable in setup_parser and retrieving
# it in the command
global_message = 'foo'
def setup_parser(subparser):
sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='subcommand')
global global_message
sp.add_parser('world', help='Print Hello world!')
sp.add_parser('folks', help='Print Hello folks!')
sp.add_parser('global', help='Print Hello folks!')
global_message = 'bar'
def hello(parser, args):
if args.subcommand == 'world':
hello_world()
elif args.subcommand == 'folks':
hello_folks()
elif args.subcommand == 'global':
print(global_message)
""".format(ext_pname=extension.pname))
extension.main.ensure('__init__.py')
implementation \
= extension.main.ensure('implementation.py')
implementation.write("""
def hello_world():
print('Hello world!')
def hello_folks():
print('Hello folks!')
""")
yield spack.main.SpackCommand('hello')
yield _hwwmir
def test_simple_command_extension(hello_world_cmd):
"""Basic test of a functioning command."""
output = hello_world_cmd()
assert 'Hello world!' in output
def test_multi_extension_search(hello_world_extension, extension_creator):
"""Ensure we can find an extension command even if it's not in the first
place we look.
"""
with extension_creator('testcommand2'):
assert ('Hello world') in spack.main.SpackCommand('hello-world')()
def test_duplicate_module_load(hello_world_cmd, capsys):
"""Ensure duplicate module load attempts are successful.
The command module will already have been loaded once by the
hello_world_cmd fixture.
"""
parser = spack.main.make_argument_parser()
args = []
hw_cmd = spack.cmd.get_command(hello_world_cmd.command_name)
hw_cmd(parser, args)
captured = capsys.readouterr()
assert captured == ('Hello world!\n', '')
@pytest.mark.parametrize('extension_name',
[None, 'hyphenated-extension'],
ids=['simple', 'hyphenated_extension_name'])
def test_command_with_import(extension_name, hello_world_with_module_in_root):
"""Ensure we can write a functioning command with multiple imported
subcommands, including where the extension name contains a hyphen.
"""
with hello_world_with_module_in_root(extension_name) as hello_world:
output = hello_world('world')
assert 'Hello world!' in output
output = hello_world('folks')
assert 'Hello folks!' in output
output = hello_world('global')
assert 'bar' in output
def test_missing_command():
"""Ensure that we raise the expected exception if the desired command is
not present.
"""
with pytest.raises(spack.extensions.CommandNotFoundError):
spack.cmd.get_module("no-such-command")
@pytest.mark.\
parametrize('extension_path,expected_exception',
[('/my/bad/extension',
spack.extensions.ExtensionNamingError),
('', spack.extensions.ExtensionNamingError),
('/my/bad/spack--extra-hyphen',
spack.extensions.ExtensionNamingError),
('/my/good/spack-extension',
spack.extensions.CommandNotFoundError),
('/my/still/good/spack-extension/',
spack.extensions.CommandNotFoundError),
('/my/spack-hyphenated-extension',
spack.extensions.CommandNotFoundError)],
ids=['no_stem', 'vacuous', 'leading_hyphen',
'basic_good', 'trailing_slash', 'hyphenated'])
def test_extension_naming(extension_path, expected_exception, config):
|
def test_missing_command_function(extension_creator, capsys):
"""Ensure we die as expected if a command module does not have the
expected command function defined.
"""
with extension_creator() as extension:
extension.\
add_command('bad-cmd',
"""\ndescription = "Empty command implementation"\n""")
with pytest.raises(SystemExit):
spack.cmd.get_module('bad-cmd')
capture = capsys.readouterr()
assert "must define function 'bad_cmd'." in capture[1]
def test_get_command_paths(config):
"""Exercise the construction of extension command search paths."""
extensions = ('extension-1', 'extension-2')
ext_paths = []
expected_cmd_paths = []
for ext in extensions:
ext_path = os.path.join('my', 'path', 'to', 'spack-' + ext)
ext_paths.append(ext_path)
expected_cmd_paths.append(os.path.join(ext_path,
spack.cmd.python_name(ext),
'cmd'))
with spack.config.override('config:extensions', ext_paths):
assert spack.extensions.get_command_paths() == expected_cmd_paths
@pytest.mark.parametrize('command_name,contents,exception',
[('bad-cmd', 'from oopsie.daisy import bad\n',
ImportError),
('bad-cmd', """var = bad_function_call('blech')\n""",
NameError),
('bad-cmd', ')\n', SyntaxError)],
ids=['ImportError', 'NameError', 'SyntaxError'])
def test_failing_command(command_name, contents, exception, extension_creator):
"""Ensure that the configured command fails to import with the specified
error.
"""
with extension_creator() as extension:
extension.add_command(command_name, contents)
with pytest.raises(exception):
spack.extensions.get_module(command_name)
| """Ensure that we are correctly validating configured extension paths
for conformity with the rules: the basename should match
``spack-<name>``; <name> may have embedded hyphens but not begin with one.
"""
with spack.config.override('config:extensions', [extension_path]):
with pytest.raises(expected_exception):
spack.cmd.get_module("no-such-command") |
main.go | package main
import (
"github.com/Haze-Lan/haze-go/examples/simple/haze-common/endpoint"
"github.com/Haze-Lan/haze-go/examples/simple/haze-provider/impl"
"github.com/Haze-Lan/haze-go/server"
"log"
)
func | () {
haze := server.NewServer()
haze.RegisterService(endpoint.Account_ServiceDesc,&impl.AccountService{})
if err := haze.Start(); err != nil {
log.Fatalf("failed to listen: %v", err)
}
} | main |
page.rs | use pinwheel::prelude::*;
use tangram_ui as ui;
use tangram_www_content::{BlogPost, Content};
use tangram_www_layouts::{document::Document, page_layout::PageLayout};
pub struct | ;
impl Component for Page {
fn into_node(self) -> Node {
let blog_posts = BlogPost::list().unwrap().into_iter().map(|blog_post| {
let href = format!("/blog/{}", blog_post.slug);
div()
.child(
ui::Link::new()
.href(href)
.child(blog_post.front_matter.title),
)
.child(ui::P::new().child(blog_post.front_matter.date))
});
Document::new()
.child(
PageLayout::new().child(
ui::S1::new()
.child(ui::H1::new().child("Blog"))
.child(ui::S2::new().children(blog_posts)),
),
)
.into_node()
}
}
| Page |
schema.go | // Code generated by entc, DO NOT EDIT.
package migrate
import (
"entgo.io/ent/dialect/sql/schema"
"entgo.io/ent/schema/field"
)
var (
// FriendshipsColumns holds the columns for the "friendships" table.
FriendshipsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "weight", Type: field.TypeInt, Default: 1},
{Name: "created_at", Type: field.TypeTime},
{Name: "user_id", Type: field.TypeInt},
{Name: "friend_id", Type: field.TypeInt},
}
// FriendshipsTable holds the schema information for the "friendships" table.
FriendshipsTable = &schema.Table{
Name: "friendships",
Columns: FriendshipsColumns,
PrimaryKey: []*schema.Column{FriendshipsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "friendships_users_user",
Columns: []*schema.Column{FriendshipsColumns[3]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.NoAction,
},
{
Symbol: "friendships_users_friend",
Columns: []*schema.Column{FriendshipsColumns[4]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.NoAction,
},
},
Indexes: []*schema.Index{
{
Name: "friendship_user_id_friend_id",
Unique: true,
Columns: []*schema.Column{FriendshipsColumns[3], FriendshipsColumns[4]},
},
{
Name: "friendship_created_at",
Unique: false,
Columns: []*schema.Column{FriendshipsColumns[2]},
},
},
}
// GroupsColumns holds the columns for the "groups" table.
GroupsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "name", Type: field.TypeString, Default: "Unknown"},
}
// GroupsTable holds the schema information for the "groups" table.
GroupsTable = &schema.Table{
Name: "groups",
Columns: GroupsColumns,
PrimaryKey: []*schema.Column{GroupsColumns[0]},
}
// RelationshipsColumns holds the columns for the "relationships" table.
RelationshipsColumns = []*schema.Column{
{Name: "weight", Type: field.TypeInt, Default: 1},
{Name: "user_id", Type: field.TypeInt},
{Name: "relative_id", Type: field.TypeInt},
}
// RelationshipsTable holds the schema information for the "relationships" table.
RelationshipsTable = &schema.Table{
Name: "relationships",
Columns: RelationshipsColumns,
PrimaryKey: []*schema.Column{RelationshipsColumns[1], RelationshipsColumns[2]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "relationships_users_user",
Columns: []*schema.Column{RelationshipsColumns[1]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.NoAction,
},
{
Symbol: "relationships_users_relative",
Columns: []*schema.Column{RelationshipsColumns[2]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.NoAction,
},
},
Indexes: []*schema.Index{
{
Name: "relationship_weight",
Unique: false,
Columns: []*schema.Column{RelationshipsColumns[0]},
},
},
}
// TweetsColumns holds the columns for the "tweets" table.
TweetsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "text", Type: field.TypeString, Size: 2147483647},
}
// TweetsTable holds the schema information for the "tweets" table.
TweetsTable = &schema.Table{
Name: "tweets",
Columns: TweetsColumns,
PrimaryKey: []*schema.Column{TweetsColumns[0]},
}
// TweetLikesColumns holds the columns for the "tweet_likes" table.
TweetLikesColumns = []*schema.Column{
{Name: "liked_at", Type: field.TypeTime},
{Name: "user_id", Type: field.TypeInt},
{Name: "tweet_id", Type: field.TypeInt},
}
// TweetLikesTable holds the schema information for the "tweet_likes" table.
TweetLikesTable = &schema.Table{
Name: "tweet_likes",
Columns: TweetLikesColumns,
PrimaryKey: []*schema.Column{TweetLikesColumns[1], TweetLikesColumns[2]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "tweet_likes_users_user",
Columns: []*schema.Column{TweetLikesColumns[1]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.NoAction,
},
{
Symbol: "tweet_likes_tweets_tweet",
Columns: []*schema.Column{TweetLikesColumns[2]},
RefColumns: []*schema.Column{TweetsColumns[0]},
OnDelete: schema.NoAction,
},
},
}
// UsersColumns holds the columns for the "users" table.
UsersColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "name", Type: field.TypeString, Default: "Unknown"},
}
// UsersTable holds the schema information for the "users" table.
UsersTable = &schema.Table{
Name: "users",
Columns: UsersColumns,
PrimaryKey: []*schema.Column{UsersColumns[0]},
}
// UserGroupsColumns holds the columns for the "user_groups" table.
UserGroupsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "joined_at", Type: field.TypeTime},
{Name: "user_id", Type: field.TypeInt},
{Name: "group_id", Type: field.TypeInt},
}
// UserGroupsTable holds the schema information for the "user_groups" table.
UserGroupsTable = &schema.Table{
Name: "user_groups",
Columns: UserGroupsColumns,
PrimaryKey: []*schema.Column{UserGroupsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "user_groups_users_user",
Columns: []*schema.Column{UserGroupsColumns[2]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.NoAction,
},
{
Symbol: "user_groups_groups_group",
Columns: []*schema.Column{UserGroupsColumns[3]},
RefColumns: []*schema.Column{GroupsColumns[0]},
OnDelete: schema.NoAction,
},
},
Indexes: []*schema.Index{
{
Name: "usergroup_user_id_group_id",
Unique: true,
Columns: []*schema.Column{UserGroupsColumns[2], UserGroupsColumns[3]},
},
},
}
// UserTweetsColumns holds the columns for the "user_tweets" table.
UserTweetsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "created_at", Type: field.TypeTime},
{Name: "user_id", Type: field.TypeInt},
{Name: "tweet_id", Type: field.TypeInt},
}
// UserTweetsTable holds the schema information for the "user_tweets" table.
UserTweetsTable = &schema.Table{
Name: "user_tweets",
Columns: UserTweetsColumns,
PrimaryKey: []*schema.Column{UserTweetsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "user_tweets_users_user",
Columns: []*schema.Column{UserTweetsColumns[2]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.NoAction,
},
{
Symbol: "user_tweets_tweets_tweet",
Columns: []*schema.Column{UserTweetsColumns[3]},
RefColumns: []*schema.Column{TweetsColumns[0]},
OnDelete: schema.NoAction,
},
},
Indexes: []*schema.Index{
{
Name: "usertweet_user_id_tweet_id",
Unique: true,
Columns: []*schema.Column{UserTweetsColumns[2], UserTweetsColumns[3]},
},
{
Name: "usertweet_tweet_id",
Unique: true,
Columns: []*schema.Column{UserTweetsColumns[3]},
},
},
}
// Tables holds all the tables in the schema.
Tables = []*schema.Table{
FriendshipsTable,
GroupsTable,
RelationshipsTable,
TweetsTable,
TweetLikesTable,
UsersTable,
UserGroupsTable,
UserTweetsTable,
}
)
func init() | {
FriendshipsTable.ForeignKeys[0].RefTable = UsersTable
FriendshipsTable.ForeignKeys[1].RefTable = UsersTable
RelationshipsTable.ForeignKeys[0].RefTable = UsersTable
RelationshipsTable.ForeignKeys[1].RefTable = UsersTable
TweetLikesTable.ForeignKeys[0].RefTable = UsersTable
TweetLikesTable.ForeignKeys[1].RefTable = TweetsTable
UserGroupsTable.ForeignKeys[0].RefTable = UsersTable
UserGroupsTable.ForeignKeys[1].RefTable = GroupsTable
UserTweetsTable.ForeignKeys[0].RefTable = UsersTable
UserTweetsTable.ForeignKeys[1].RefTable = TweetsTable
} |
|
strings_test.go | package utils_test
import (
"github.com/stretchr/testify/suite"
"testing"
"github.com/seaiiok/protokit/utils"
) |
type StringsTest struct {
suite.Suite
}
func TestStrings(t *testing.T) {
suite.Run(t, new(StringsTest))
}
func (assert *StringsTest) TestInStringSlice() {
vals := []string{"val1", "val2"}
assert.True(utils.InStringSlice(vals, "val1"))
assert.False(utils.InStringSlice(vals, "wat"))
} | |
follower.go | package follower
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"sync"
"time"
"github.com/fsnotify/fsnotify"
)
const (
bufSize = 4 * 1024
peekSize = 1024
)
var (
_ = fmt.Print
)
type Line struct {
bytes []byte
discarded int
}
func (l *Line) Bytes() []byte {
return l.bytes
}
func (l *Line) String() string {
return string(l.bytes)
}
func (l *Line) Discarded() int {
return l.discarded
}
type Config struct {
Offset int64
Whence int
Reopen bool
}
type Follower struct {
once sync.Once
file *os.File
filename string
lines chan Line
err error
config Config
reader *bufio.Reader
watcher *fsnotify.Watcher
offset int64
closeCh chan struct{}
}
func New(filename string, config Config) (*Follower, error) {
t := &Follower{
filename: filename,
lines: make(chan Line),
config: config,
closeCh: make(chan struct{}),
}
err := t.reopen()
if err != nil {
return nil, err
}
go t.once.Do(t.run)
return t, nil
}
func (t *Follower) Lines() chan Line {
return t.lines
}
func (t *Follower) Err() error {
return t.err
}
func (t *Follower) Close() {
t.closeCh <- struct{}{}
}
func (t *Follower) run() {
t.close(t.follow())
}
func (t *Follower) follow() error {
_, err := t.file.Seek(t.config.Offset, t.config.Whence)
if err != nil {
return err
}
var (
eventChan = make(chan fsnotify.Event)
errChan = make(chan error, 1)
)
t.watcher, err = fsnotify.NewWatcher()
if err != nil {
return err
}
defer t.watcher.Close()
go t.watchFileEvents(eventChan, errChan)
t.watcher.Add(t.filename)
for {
for {
// discard leading NUL bytes
var discarded int
for {
b, _ := t.reader.Peek(peekSize)
i := bytes.LastIndexByte(b, '\x00')
if i > 0 {
n, _ := t.reader.Discard(i + 1)
discarded += n
}
if i+1 < peekSize {
break
}
}
s, err := t.reader.ReadBytes('\n')
if err != nil && err != io.EOF {
return err
}
// if we encounter EOF before a line delimiter,
// ReadBytes() will return the remaining bytes,
// so push them back onto the buffer, rewind
// our seek position, and wait for further file changes.
// we also have to save our dangling byte count in the event
// that we want to re-open the file and seek to the end
if err == io.EOF {
l := len(s)
t.offset, err = t.file.Seek(-int64(l), io.SeekCurrent)
if err != nil |
t.reader.Reset(t.file)
break
}
t.sendLine(s, discarded)
}
// we're now at EOF, so wait for changes
select {
case evt := <-eventChan:
switch evt.Op {
// as soon as something is written, go back and read until EOF.
case fsnotify.Chmod:
fallthrough
case fsnotify.Write:
fi, err := t.file.Stat()
if err != nil {
if !os.IsNotExist(err) {
return err
}
// it's possible that an unlink can cause fsnotify.Chmod,
// so attempt to rewatch if the file is missing
if err := t.rewatch(); err != nil {
return err
}
continue
}
// file was truncated, seek to the beginning
if t.offset > fi.Size() {
t.offset, err = t.file.Seek(0, io.SeekStart)
if err != nil {
return err
}
t.reader.Reset(t.file)
}
continue
// if a file is removed or renamed
// and re-opening is desired, see if it appears
// again within a 1 second deadline. this should be enough time
// to see the file again for log rotation programs with this behavior
default:
if !t.config.Reopen {
return nil
}
//XXX - reopen fails to reopen #5
time.Sleep(30 * time.Second)
if err := t.rewatch(); err != nil {
return err
}
continue
}
// any errors that come from fsnotify
case err := <-errChan:
return err
// a request to stop
case <-t.closeCh:
t.watcher.Remove(t.filename)
return nil
// fall back to 10 second polling if we haven't received any fsevents
// stat the file, if it's still there, just continue and try to read bytes
// if not, go through our re-opening routine
case <-time.After(10 * time.Second):
fi1, err := t.file.Stat()
if err != nil && !os.IsNotExist(err) {
return err
}
fi2, err := os.Stat(t.filename)
if err != nil && !os.IsNotExist(err) {
return err
}
if os.SameFile(fi1, fi2) {
continue
}
if err := t.rewatch(); err != nil {
return err
}
continue
}
}
return nil
}
func (t *Follower) rewatch() error {
t.watcher.Remove(t.filename)
if err := t.reopen(); err != nil {
return err
}
t.watcher.Add(t.filename)
return nil
}
func (t *Follower) reopen() error {
if t.file != nil {
t.file.Close()
t.file = nil
}
file, err := os.Open(t.filename)
if err != nil {
return err
}
t.file = file
t.reader = bufio.NewReaderSize(t.file, bufSize)
return nil
}
func (t *Follower) close(err error) {
t.err = err
if t.file != nil {
t.file.Close()
}
close(t.lines)
}
func (t *Follower) sendLine(l []byte, d int) {
t.lines <- Line{l[:len(l)-1], d}
}
func (t *Follower) watchFileEvents(eventChan chan fsnotify.Event, errChan chan error) {
for {
select {
case evt, ok := <-t.watcher.Events:
if !ok {
return
}
// debounce write events, but send all others
switch evt.Op {
case fsnotify.Write:
select {
case eventChan <- evt:
default:
}
default:
select {
case eventChan <- evt:
case err := <-t.watcher.Errors:
errChan <- err
return
}
}
// die on a file watching error
case err := <-t.watcher.Errors:
errChan <- err
return
}
}
}
| {
return err
} |
cmdmain.go | /*
Copyright 2013 The Camlistore Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cmdmain contains the shared implementation for camget,
// camput, camtool, and other Camlistore command-line tools.
package cmdmain
import (
"flag"
"fmt"
"io"
"log"
"os"
"path/filepath"
"sort"
"sync"
"camlistore.org/pkg/buildinfo"
"go4.org/legal"
)
var (
FlagVersion = flag.Bool("version", false, "show version")
FlagHelp = flag.Bool("help", false, "print usage")
FlagVerbose = flag.Bool("verbose", false, "extra debug logging")
FlagLegal = flag.Bool("legal", false, "show licenses")
)
var (
// ExtraFlagRegistration allows to add more flags from
// other packages (with AddFlags) when Main starts.
ExtraFlagRegistration = func() {}
// PreExit runs after the subcommand, but before Main terminates
// with either success or the error from the subcommand.
PreExit = func() {}
// ExitWithFailure determines whether the command exits
// with a non-zero exit status.
ExitWithFailure bool
// CheckCwd checks the current working directory, and possibly
// changes it, or aborts the run if needed.
CheckCwd = func() {}
// CheckModtime provides a way to check if the currently running binary
// is out of date. If it returns an error, the run is aborted.
CheckModtime = func() error { return nil }
)
var ErrUsage = UsageError("invalid command")
type UsageError string
func (ue UsageError) Error() string {
return "Usage error: " + string(ue)
}
var (
// mode name to actual subcommand mapping
modeCommand = make(map[string]CommandRunner)
modeFlags = make(map[string]*flag.FlagSet)
wantHelp = make(map[string]*bool)
// Indirections for replacement by tests | Exit = realExit
// TODO: abstract out vfs operation. should never call os.Stat, os.Open, os.Create, etc.
// Only use fs.Stat, fs.Open, where vs is an interface type.
// TODO: switch from using the global flag FlagSet and use our own. right now
// running "go test -v" dumps the flag usage data to the global stderr.
)
func realExit(code int) {
os.Exit(code)
}
// CommandRunner is the type that a command mode should implement.
type CommandRunner interface {
Usage()
RunCommand(args []string) error
}
type exampler interface {
Examples() []string
}
type describer interface {
Describe() string
}
// RegisterCommand adds a mode to the list of modes for the main command.
// It is meant to be called in init() for each subcommand.
func RegisterCommand(mode string, makeCmd func(Flags *flag.FlagSet) CommandRunner) {
if _, dup := modeCommand[mode]; dup {
log.Fatalf("duplicate command %q registered", mode)
}
flags := flag.NewFlagSet(mode+" options", flag.ContinueOnError)
flags.Usage = func() {}
var cmdHelp bool
flags.BoolVar(&cmdHelp, "help", false, "Help for this mode.")
wantHelp[mode] = &cmdHelp
modeFlags[mode] = flags
modeCommand[mode] = makeCmd(flags)
}
type namedMode struct {
Name string
Command CommandRunner
}
// TODO(mpl): do we actually need this? I changed usage
// to simply iterate over all of modeCommand and it seems
// fine.
func allModes(startModes []string) <-chan namedMode {
ch := make(chan namedMode)
go func() {
defer close(ch)
done := map[string]bool{}
for _, name := range startModes {
done[name] = true
cmd := modeCommand[name]
if cmd == nil {
panic("bogus mode: " + name)
}
ch <- namedMode{name, cmd}
}
var rest []string
for name := range modeCommand {
if !done[name] {
rest = append(rest, name)
}
}
sort.Strings(rest)
for _, name := range rest {
ch <- namedMode{name, modeCommand[name]}
}
}()
return ch
}
func hasFlags(flags *flag.FlagSet) bool {
any := false
flags.VisitAll(func(*flag.Flag) {
any = true
})
return any
}
// Errorf prints to Stderr
func Errorf(format string, args ...interface{}) {
fmt.Fprintf(Stderr, format, args...)
}
func usage(msg string) {
cmdName := filepath.Base(os.Args[0])
if msg != "" {
Errorf("Error: %v\n", msg)
}
Errorf(`
Usage: ` + cmdName + ` [globalopts] <mode> [commandopts] [commandargs]
Modes:
`)
for mode, cmd := range modeCommand {
if des, ok := cmd.(describer); ok {
Errorf(" %s: %s\n", mode, des.Describe())
}
}
Errorf("\nExamples:\n")
for mode, cmd := range modeCommand {
if ex, ok := cmd.(exampler); ok {
exs := ex.Examples()
if len(exs) > 0 {
Errorf("\n")
}
for _, example := range exs {
Errorf(" %s %s %s\n", cmdName, mode, example)
}
}
}
Errorf(`
For mode-specific help:
` + cmdName + ` <mode> -help
Global options:
`)
flag.PrintDefaults()
Exit(1)
}
func help(mode string) {
cmdName := os.Args[0]
// We can skip all the checks as they're done in Main
cmd := modeCommand[mode]
cmdFlags := modeFlags[mode]
cmdFlags.SetOutput(Stderr)
if des, ok := cmd.(describer); ok {
Errorf("%s\n", des.Describe())
}
Errorf("\n")
cmd.Usage()
if hasFlags(cmdFlags) {
cmdFlags.PrintDefaults()
}
if ex, ok := cmd.(exampler); ok {
Errorf("\nExamples:\n")
for _, example := range ex.Examples() {
Errorf(" %s %s %s\n", cmdName, mode, example)
}
}
}
// registerFlagOnce guards ExtraFlagRegistration. Tests may invoke
// Main multiple times, but duplicate flag registration is fatal.
var registerFlagOnce sync.Once
var setCommandLineOutput func(io.Writer) // or nil if before Go 1.2
// PrintLicenses prints all the licences registered by go4.org/legal for this program.
func PrintLicenses() {
for _, text := range legal.Licenses() {
fmt.Fprintln(Stderr, text)
}
}
// Main is meant to be the core of a command that has
// subcommands (modes), such as camput or camtool.
func Main() {
registerFlagOnce.Do(ExtraFlagRegistration)
if setCommandLineOutput != nil {
setCommandLineOutput(Stderr)
}
flag.Parse()
CheckCwd()
if err := CheckModtime(); err != nil {
log.Print(err)
Exit(1)
}
args := flag.Args()
if *FlagVersion {
fmt.Fprintf(Stderr, "%s version: %s\n", os.Args[0], buildinfo.Version())
return
}
if *FlagHelp {
usage("")
}
if *FlagLegal {
PrintLicenses()
return
}
if len(args) == 0 {
usage("No mode given.")
}
mode := args[0]
cmd, ok := modeCommand[mode]
if !ok {
usage(fmt.Sprintf("Unknown mode %q", mode))
}
cmdFlags := modeFlags[mode]
cmdFlags.SetOutput(Stderr)
err := cmdFlags.Parse(args[1:])
if err != nil {
err = ErrUsage
} else {
if *wantHelp[mode] {
help(mode)
return
}
err = cmd.RunCommand(cmdFlags.Args())
}
if ue, isUsage := err.(UsageError); isUsage {
if isUsage {
Errorf("%s\n", ue)
}
cmd.Usage()
Errorf("\nGlobal options:\n")
flag.PrintDefaults()
if hasFlags(cmdFlags) {
Errorf("\nMode-specific options for mode %q:\n", mode)
cmdFlags.PrintDefaults()
}
Exit(1)
}
PreExit()
if err != nil {
if !ExitWithFailure {
// because it was already logged if ExitWithFailure
Errorf("Error: %v\n", err)
}
Exit(2)
}
} | Stderr io.Writer = os.Stderr
Stdout io.Writer = os.Stdout
Stdin io.Reader = os.Stdin
|
log_handler.rs | use std::pin::Pin;
use std::task::{Context, Poll};
use futures::Stream;
use pin_project_lite::pin_project;
use crate::InBoundHandle;
/// 1. stream 转 T, stream.next().await -> T
/// 2. 构造 handle 链 -> 内部其实是 stream 链
/// 3. stream 链与 map 不同的是 pending,ready 不只是一个闭包函数F,还有其他控制逻辑,如超时
/// 4. 抽象 poll_ready 到 handle 里,然后封装到 stream 中
/// 5. framed -> rSocket { framed, f } -> timeout { rSocket, time }
/// inner.poll_next -> inner.poll_next
/// 6. 是否有必要Copy两次inner,直接在结构体上实现Stream是不是更好,考虑实现Handler和使用Handler
pin_project! {
#[derive(Debug, Clone)]
pub struct LoggerHandler<S, F> {
inner: S,
f: F,
}
}
impl <S, F> LoggerHandler<S, F>
where
S: Stream + Clone,
F: FnMut(S::Item) -> S::Item,
{
pub fn new(inner: S, f: F) -> Self{
Self{
inner,
f,
}
}
}
impl <S, F> InBoundHandle<S::Item> for LoggerHandler<S, F>
where
S: Stream + Clone,
F: FnMut(S::Item) -> S::Item,
{
type Item = S::Item;
type Stream = LoggerHandler<S, F>;
type F = F;
// fn process(&mut self, f: Self::F) -> Self::Stream {
// LoggerStream {
// inner: self.inner.,
// f,
// }
// }
}
// pub struct LoggerStream<S, F> {
// inner: S,
// f: F
// }
impl <S, F> Stream for LoggerHandler<S, F>
where
S: Stream + Clone,
F: FnMut(S::Item) -> S::Item,
{
type Item = S::Item;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.as_mut().project();
this.inner
| xt(cx)
.map(|opt| opt.map(|x| (self.as_mut().project().f)(x)))
}
}
| .poll_ne |
serializers.py | from django.db.models import Sum
from rest_framework import serializers
from like.models import Like
from .models import Comment
from accounts.serializers import UserSerializer
class CommentSerializer(serializers.ModelSerializer):
owner = UserSerializer(read_only=True)
like = serializers.SerializerMethodField()
def get_like(self, obj):
if type(obj) is Comment:
if obj._meta is Comment._meta:
liked = Like.objects.filter(comment_id=obj.id, value=1).aggregate(Sum('value'))['value__sum']
dislike = Like.objects.filter(comment_id=obj.id, value=-1).aggregate(Sum('value'))['value__sum']
liked = liked if liked else 0
dislike = dislike if dislike else 0
return {'liked': liked, 'disLiked': dislike}
return {'liked': 0, 'disLiked': 0}
def validate_post(self, attrs):
if not attrs:
raise serializers.ValidationError('post not send ')
def validate_comment(self, attrs):
if not dict(self.initial_data).get('post'):
if not attrs:
|
def update(self, instance, validated_data):
instance.body = validated_data.get('body', instance.body)
instance.save()
return instance
class Meta:
model = Comment
fields = '__all__'
| raise serializers.ValidationError('comment not send ') |
extra-components-routing.module.ts | import { NgModule } from '@angular/core';
import { Routes, RouterModule } from '@angular/router';
import { ExtraComponentsComponent } from './extra-components.component';
import { AlertComponent } from './alert/alert.component';
import { ProgressBarComponent } from './progress-bar/progress-bar.component';
import { SpinnerComponent } from './spinner/spinner.component';
import { CalendarComponent } from './calendar/calendar.component';
import { ChatComponent } from './chat/chat.component';
import { CalendarKitFullCalendarShowcaseComponent } from './calendar-kit/calendar-kit.component';
import { NebularFormInputsComponent } from './form-inputs/nebular-form-inputs.component';
const routes: Routes = [{
path: '',
component: ExtraComponentsComponent,
children: [
{
path: 'calendar',
component: CalendarComponent,
},
{
path: 'progress-bar',
component: ProgressBarComponent,
},
{
path: 'spinner',
component: SpinnerComponent,
},
{
path: 'alert',
component: AlertComponent,
},
{
path: 'calendar-kit',
component: CalendarKitFullCalendarShowcaseComponent,
},
{
path: 'chat',
component: ChatComponent,
},
{
path: 'nebular',
component: NebularFormInputsComponent,
},
],
}];
@NgModule({
imports: [RouterModule.forChild(routes)],
exports: [RouterModule],
})
export class | {
}
| ExtraComponentsRoutingModule |
filters.js | const crypto = require('crypto');
const { whereRaw } = require('../../../../utils/db/knex');
const commonFilters = require('../../_common/sql/filters');
const commonFiltersOrder = require('../../_common/sql/filtersOrder');
module.exports = {
filters: {
...commonFilters,
script: s =>
whereRaw(
'md5(script) = ?',
crypto
.createHash('md5')
.update(s)
.digest('hex')
),
},
filtersOrder: [...commonFiltersOrder, 'script'], | }; |
|
forward.py | from __future__ import division
import inspect
import numpy as np
import warnings
from scipy.optimize import root
from .ancil import natural, cmip6_volcanic, cmip6_solar, historical_scaling
from .constants import molwt, lifetime, radeff
from .constants.general import M_ATMOS, ppm_gtc
from .defaults import carbon, thermal
from .forcing import ozone_tr, ozone_st, h2o_st, contrails, aerosols, bc_snow,\
landuse
from .forcing.ghg import co2_log
def iirf_interp(alp_b,a,tau,iirf_h,targ_iirf):
"""Interpolation function for finding alpha, the CO2 decay time constant
scaling factor, in iirf_h equation. See Eq. (7) of Millar et al ACP (2017).
Inputs:
alp_b : Guess for alpha, the scale factor, for tau
a : partition fractions for CO2 boxes
tau : time constants for CO2 boxes
iirf_h : time horizon for time-integrated airborne fraction
targ_iirf: iirf_h calculated using simple parameterisation (Eq. (8),
Millar et al (2017)).
"""
iirf_arr = alp_b*(np.sum(a*tau*(1.0 - np.exp(-iirf_h/(tau*alp_b)))))
return iirf_arr - targ_iirf
def iirf_simple(c_acc, temp, r0, rc, rt, iirf_max):
"""Simple linear iIRF relationship. Eq. (8) of Millar et al ACP (2017).
Inputs:
c_acc : cumulative airborne carbon anomaly (GtC) since
pre-industrial
temp : temperature anomaly since pre-industrial
r0 : pre-industrial time-integrated airborne fraction (yr)
rc : sensitivity of time-integrated airborne fraction to airborne
carbon (yr/GtC)
rt : sensitivity of time-integrated airborne fraction to
temperature (yr/K)
iirf_max : maximum value of time-integrated airborne fraction (yr)
Outputs:
iirf : time-integrated airborne fraction of carbon (yr)
"""
return np.min([r0 + rc * c_acc + rt * temp, iirf_max])
def calculate_q(tcrecs, d, f2x, tcr_dbl, nt):
"""If TCR and ECS are supplied, calculate the q model coefficients.
See Eqs. (4) and (5) of Millar et al ACP (2017).
Inputs:
tcrecs : 2-element array of transient climate response (TCR) and
equilibrium climate sensitivity (ECS).
d : The slow and fast thermal response time constants
f2x : Effective radiative forcing from a doubling of CO2
tcr_dbl : time to a doubling of CO2 under 1% per year CO2 increase, yr
nt : number of timesteps | Outputs:
q : coefficients of slow and fast temperature change in each
timestep ((nt, 2) array).
"""
# TODO:
# error checking before call
# benchmark one call per timestep and if not slower do not convert to 2D
# - will make code cleaner
k = 1.0 - (d/tcr_dbl)*(1.0 - np.exp(-tcr_dbl/d))
# if ECS and TCR are not time-varying, expand them to 2D array anyway
if tcrecs.ndim==1:
if len(tcrecs)!=2:
raise ValueError(
"Constant TCR and ECS should be a 2-element array")
tcrecs = np.ones((nt, 2)) * tcrecs
elif tcrecs.ndim==2:
if tcrecs.shape!=(nt, 2):
raise ValueError(
"Transient TCR and ECS should be a nt x 2 array")
q = (1.0 / f2x) * (1.0/(k[0]-k[1])) * np.array([
tcrecs[:,0]-tcrecs[:,1]*k[1],tcrecs[:,1]*k[0]-tcrecs[:,0]]).T
return q
def carbon_cycle(e0, c_acc0, temp, r0, rc, rt, iirf_max, time_scale_sf0, a, tau,
iirf_h, carbon_boxes0, c_pi, c0, e1):
"""Calculates CO2 concentrations from emissions.
Inputs:
e0 : emissions of CO2 (GtC) in timestep t-1
c_acc0 : cumulative airborne carbon anomaly (GtC) since
pre-industrial, timestep t-1
temp : temperature anomaly above pre-industrial (K)
r0 : pre-industrial time-integrated airborne fraction (yr)
rc : sensitivity of time-integrated airborne fraction to
airborne carbon (yr/GtC)
rt : sensitivity of time-integrated airborne fraction to
temperature (yr/K)
iirf_max : maximum value of time-integrated airborne fraction (yr)
time_scale_sf0: initial guess of alpha scaling factor
a : partition coefficient of carbon boxes
tau : present-day decay time constants of CO2 (yr)
iirf_h : time horizon for time-integrated airborne fraction (yr)
carbon_boxes0 : carbon stored in each atmospheric reservoir at timestep
t-1 (GtC)
c_pi : pre-industrial concentration of CO2, ppmv
c0 : concentration of CO2 in timestep t-1, ppmv
e1 : emissions of CO2 in timestep t, GtC
Outputs:
c1 : concentrations of CO2 in timestep t, ppmv
c_acc1 : cumulative airborne carbon anomaly (GtC) since
pre-industrial, timestep t
carbon_boxes1 : carbon stored in each atmospheric reservoir at timestep
t (GtC)
time_scale_sf : scale factor for CO2 decay constants
"""
iirf = iirf_simple(c_acc0, temp, r0, rc, rt, iirf_max)
time_scale_sf = root(iirf_interp, time_scale_sf0,
args=(a, tau, iirf_h, iirf))['x']
tau_new = tau * time_scale_sf
carbon_boxes1 = carbon_boxes0*np.exp(-1.0/tau_new) + a*e1 / ppm_gtc
c1 = np.sum(carbon_boxes1) + c_pi
c_acc1 = c_acc0 + 0.5*(e1 + e0) - (c1 - c0)*ppm_gtc
return c1, c_acc1, carbon_boxes1, time_scale_sf
def emis_to_conc(c0, e0, e1, ts, lt, vm):
"""Calculate concentrations of well mixed GHGs from emissions for simple
one-box model.
Inputs (all can be scalar or 1D arrays for multiple species):
c0: concentrations in timestep t-1
e0: emissions in timestep t-1
e1: emissions in timestep t
ts: length of timestep. Use 1 for sensible results in FaIR 1.3.
lt: atmospheric (e-folding) lifetime of GHG
vm: conversion from emissions units (e.g. Mt) to concentrations units
(e.g. ppb)
Outputs:
c1: concentrations in timestep t
"""
c1 = c0 - c0 * (1.0 - np.exp(-ts/lt)) + 0.5 * ts * (e1 + e0) * vm
return c1
def forc_to_temp(t0, q, d, f, e=1.0):
"""Calculate temperature from a given radiative forcing.
Inputs:
t0: Temperature in timestep t-1
q: The matrix contributions to slow and fast temperature change
calculated from ECS and TCR (2 element array)
d: The slow and fast thermal response time constants (2 element array)
f: radiative forcing (can be scalar or 1D array representing multiple
species)
Keywords:
e: efficacy factor (default 1); if f is an array, e should be an array
of the same length.
Outputs:
t1: slow and fast contributions to total temperature (2 element array)
in timestep t
"""
t1 = t0*np.exp(-1.0/d) + q*(1.0-np.exp((-1.0)/d))*np.sum(f*e)
return t1
def fair_scm(
emissions=False,
emissions_driven=True,
C=None,
other_rf=0.0,
q = thermal.q,
tcrecs = thermal.tcrecs,
d = thermal.d,
F2x = thermal.f2x,
tcr_dbl = thermal.tcr_dbl,
a = carbon.a,
tau = carbon.tau,
r0 = carbon.r0,
rc = carbon.rc,
rt = carbon.rt,
iirf_max = carbon.iirf_max,
iirf_h = carbon.iirf_h,
C_pi=np.array([278., 722., 273., 34.497] + [0.]*25 + [13.0975, 547.996]),
restart_in=False,
restart_out=False,
F_tropO3 = 0.,
F_aerosol = 0.,
F_volcanic=cmip6_volcanic.Forcing.volcanic,
F_solar=cmip6_solar.Forcing.solar,
F_contrails=0.,
F_bcsnow=0.,
F_landuse=0.,
aviNOx_frac=0.,
fossilCH4_frac=0.,
natural=natural.Emissions.emissions,
efficacy=np.array([1.]*9 + [3.] + [1.]*3),
scale=None,
oxCH4_frac=0.61,
ghg_forcing="Etminan",
stwv_from_ch4=None,
b_aero = np.array([-6.2227e-3, 0.0, -3.8392e-4, -1.16551e-3, 1.601537e-2,
-1.45339e-3, -1.55605e-3]),
b_tro3 = np.array([2.8249e-4, 1.0695e-4, -9.3604e-4, 99.7831e-4]),
ghan_params = np.array([-1.95011431, 0.01107147, 0.01387492]),
stevens_params = np.array([0.001875, 0.634, 60.]),
useMultigas=True,
useStevenson=True,
lifetimes=False,
aerosol_forcing="aerocom+ghan",
scaleAerosolAR5=True,
fixPre1850RCP=True,
useTropO3TFeedback=True,
scaleHistoricalAR5=False,
contrail_forcing='NOx',
kerosene_supply=0.,
landuse_forcing='co2',
):
# is iirf_h < iirf_max? Don't stop the code, but warn user
if iirf_h < iirf_max:
warnings.warn('iirf_h=%f, which is less than iirf_max (%f)'
% (iirf_h, iirf_max), RuntimeWarning)
# Conversion between ppb/ppt concentrations and Mt/kt emissions
# in the RCP databases ppb = Mt and ppt = kt so factor always 1e18
emis2conc = M_ATMOS/1e18*np.asarray(molwt.aslist)/molwt.AIR
# Funny units for nitrogen emissions - N2O is expressed in N2 equivalent
n2o_sf = molwt.N2O/molwt.N2
emis2conc[2] = emis2conc[2] / n2o_sf
# Convert any list to a numpy array for (a) speed and (b) consistency.
# Goes through all variables in scope and converts them.
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
for arg_to_check in args:
if type(values[arg_to_check]) is list:
exec(arg_to_check + '= np.array(' + arg_to_check + ')')
# Set up the output timeseries variables depending on options and perform
# basic sense checks
if useMultigas:
ngas = 31
nF = 13
if emissions_driven:
if type(emissions) is not np.ndarray or emissions.shape[1] != 40:
raise ValueError(
"emissions timeseries should be a nt x 40 numpy array")
carbon_boxes_shape = (emissions.shape[0], a.shape[0])
thermal_boxes_shape = (emissions.shape[0], d.shape[0])
nt = emissions.shape[0]
else:
if type(C) is not np.ndarray or C.shape[1] != ngas:
raise ValueError(
"C timeseries should be a nt x %d numpy array" % ngas)
thermal_boxes_shape = (C.shape[0], d.shape[0])
nt = C.shape[0]
if np.isscalar(fossilCH4_frac):
fossilCH4_frac = np.ones(nt) * fossilCH4_frac
# If custom gas lifetimes are supplied, use them, else import defaults
if type(lifetimes) is np.ndarray:
if len(lifetimes)!=ngas:
raise ValueError(
"custom GHG lifetime array must have " + str(ngas) +
" elements")
else:
lifetimes = lifetime.aslist
# Select the desired GHG forcing relationship and populate
# stratospheric water vapour from methane scale factor if not specified
# by user
if ghg_forcing.lower()=="etminan":
from .forcing.ghg import etminan as ghg
if stwv_from_ch4==None: stwv_from_ch4=0.12
elif ghg_forcing.lower()=="myhre":
from .forcing.ghg import myhre as ghg
if stwv_from_ch4==None: stwv_from_ch4=0.15
else:
raise ValueError(
"ghg_forcing should be 'etminan' (default) or 'myhre'")
# Check natural emissions and convert to 2D array if necessary
if type(natural) in [float,int]:
natural = natural * np.ones((nt,2))
elif type(natural) is np.ndarray:
if natural.ndim==1:
if natural.shape[0]!=2:
raise ValueError(
"natural emissions should be a 2-element or nt x 2 " +
"array")
natural = np.tile(natural, nt).reshape((nt,2))
elif natural.ndim==2:
if natural.shape[1]!=2 or natural.shape[0]!=nt:
raise ValueError(
"natural emissions should be a 2-element or nt x 2 " +
"array")
else:
raise ValueError(
"natural emissions should be a scalar, 2-element, or nt x 2 " +
"array")
# check scale factor is correct shape. If 1D inflate to 2D
if scale is None:
scale = np.ones((nt,nF))
elif scale.shape[-1]==nF:
if scale.ndim==2 and scale.shape[0]==nt:
pass
elif scale.ndim==1:
scale = np.tile(scale, nt).reshape((nt,nF))
else:
raise ValueError("in multi-gas mode, scale should be None, or a "+
"(13,) or (nt, 13) array")
# if scaling the historical time series to match AR5, apply these
# factors to whatever the user specifies
if scaleHistoricalAR5:
scale=scale*historical_scaling.all[:nt,:]
else:
ngas = 1
nF = 1
if emissions_driven:
if type(emissions) is np.ndarray:
if emissions.ndim != 1:
raise ValueError(
"In CO2-only mode, emissions should be a 1D array")
nt = emissions.shape[0]
carbon_boxes_shape = (nt, a.shape[0])
thermal_boxes_shape = (nt, d.shape[0])
elif type(other_rf) is np.ndarray:
if other_rf.ndim != 1:
raise ValueError(
"In CO2-only mode, other_rf should be a 1D array")
nt = other_rf.shape[0]
carbon_boxes_shape = (nt, a.shape[0])
thermal_boxes_shape = (nt, d.shape[0])
emissions = np.zeros(nt)
else:
raise ValueError(
"Neither emissions or other_rf is defined as a timeseries")
else:
if type(C) is not np.ndarray or C.ndim != 1:
raise ValueError(
"In CO2-only mode, concentrations should be a 1D array")
nt = C.shape[0]
thermal_boxes_shape = (nt, d.shape[0])
# expand C to 2D array for consistency with other calcs
C = C.reshape((nt, 1))
# check scale factor is correct shape - either scalar or 1D
# needs try/except really
if scale is None:
scale = np.ones(nt)
elif np.isscalar(scale):
scale = np.ones(nt) * scale
elif scale.ndim==1 and scale.shape[0]==nt:
pass
else:
raise ValueError("in CO2-only mode, scale should be None, a "+
"scalar or a (nt,) array")
# if scaling the historical time series to match AR5, apply these
# factors to whatever the user specifies
if scaleHistoricalAR5:
scale=scale*historical_scaling.co2[:nt]
# If TCR and ECS are supplied, calculate q coefficients
if type(tcrecs) is np.ndarray:
q = calculate_q(tcrecs, d, F2x, tcr_dbl, nt)
# Check a and tau are same size
if a.ndim != 1:
raise ValueError("a should be a 1D array")
if tau.ndim != 1:
raise ValueError("tau should be a 1D array")
if len(a) != len(tau):
raise ValueError("a and tau should be the same size")
if not np.isclose(np.sum(a), 1.0):
raise ValueError("a should sum to one")
# Allocate intermediate and output arrays
F = np.zeros((nt, nF))
C_acc = np.zeros(nt)
T_j = np.zeros(thermal_boxes_shape)
T = np.zeros(nt)
C_0 = np.copy(C_pi)
if emissions_driven:
C = np.zeros((nt, ngas))
R_i = np.zeros(carbon_boxes_shape)
if restart_in:
R_minus1 = restart_in[0]
T_j_minus1 = restart_in[1]
C_acc_minus1 = restart_in[2]
E_minus1 = restart_in[3]
C_minus1 = np.sum(R_minus1,axis=-1) + C_0[0]
C[0,0], C_acc[0], R_i[0,:], time_scale_sf = carbon_cycle(
E_minus1,
C_acc_minus1,
np.sum(T_j_minus1),
r0,
rc,
rt,
iirf_max,
0.16,
a,
tau,
iirf_h,
R_minus1,
C_pi[0],
C_minus1,
emissions[0]
)
if np.isscalar(other_rf):
F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf
else:
F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf[0]
F[0,0] = F[0,0] * scale[0]
T_j[0,:] = forc_to_temp(T_j_minus1, q[0,:], d, F[0,:])
T[0]=np.sum(T_j[0,:],axis=-1)
else:
# Initialise the carbon pools to be correct for first timestep in
# numerical method
if emissions_driven:
if useMultigas:
R_i[0,:] = a * (np.sum(emissions[0,1:3])) / ppm_gtc
C[0,1:] = C_0[1:]
else:
R_i[0,:] = a * emissions[0,np.newaxis] / ppm_gtc
C[0,0] = np.sum(R_i[0,:],axis=-1) + C_0[0]
if useMultigas:
# CO2, CH4 and N2O are co-dependent
F[0,0:3] = ghg(C[0,0:3], C_pi[0:3], F2x=F2x)
# Minor (F- and H-gases) are linear in concentration
# the factor of 0.001 here is because radiative efficiencies are given
# in W/m2/ppb and concentrations of minor gases are in ppt.
F[0,3] = np.sum((C[0,3:] - C_pi[3:]) * radeff.aslist[3:] * 0.001)
# Tropospheric ozone:
if emissions_driven:
if useStevenson:
F[0,4] = ozone_tr.stevenson(emissions[0,:], C[0,1],
T=np.sum(T_j[0,:]),
feedback=useTropO3TFeedback,
fix_pre1850_RCP=fixPre1850RCP)
else:
F[0,4] = ozone_tr.regress(emissions[0,:], beta=b_tro3)
else:
F[:,4] = F_tropO3
# Stratospheric ozone depends on concentrations of ODSs (index 15-30)
F[0,5] = ozone_st.magicc(C[0,15:], C_pi[15:])
# Stratospheric water vapour is a function of the methane ERF
F[0,6] = h2o_st.linear(F[0,1], ratio=stwv_from_ch4)
# Forcing from contrails. No climate feedback so can live outside
# of forward model in this version
if emissions_driven:
if contrail_forcing.lower()[0]=='n': # from NOx emissions
F[:,7] = contrails.from_aviNOx(emissions, aviNOx_frac)
elif contrail_forcing.lower()[0]=='f': # from kerosene production
F[:,7] = contrails.from_fuel(kerosene_supply)
elif contrail_forcing.lower()[0]=='e': # external forcing timeseries
F[:,7] = F_contrails
else:
raise ValueError("contrails must be one of 'NOx' (estimated "+
"from NOx emissions), 'fuel' (estimated from annual jet fuel "+
"supplied) or 'external' (an external forcing time series).")
else:
F[:,7] = F_contrails
# Forcing from aerosols - again no feedback dependence
if emissions_driven:
if aerosol_forcing.lower()=='stevens':
F[:,8] = aerosols.Stevens(emissions, stevens_params=stevens_params)
elif 'aerocom' in aerosol_forcing.lower():
F[:,8] = aerosols.aerocom_direct(emissions, beta=b_aero)
if 'ghan' in aerosol_forcing.lower():
F[:,8] = F[:,8] + aerosols.ghan_indirect(emissions,
scale_AR5=scaleAerosolAR5,
fix_pre1850_RCP=fixPre1850RCP,
ghan_params=ghan_params)
elif aerosol_forcing.lower()[0] == 'e':
F[:,8] = F_aerosol
else:
raise ValueError("aerosol_forcing should be one of 'stevens', " +
"aerocom, aerocom+ghan or external")
else:
F[:,8] = F_aerosol
# Black carbon on snow - no feedback dependence
if emissions_driven:
F[:,9] = bc_snow.linear(emissions)
else:
F[:,9] = F_bcsnow
# Land use change - either use a scaling with cumulative CO2 emissions
# or an external time series
if emissions_driven:
if landuse_forcing.lower()[0]=='c':
F[:,10] = landuse.cumulative(emissions)
elif landuse_forcing.lower()[0]=='e':
F[:,10] = F_landuse
else:
raise ValueError(
"landuse_forcing should be one of 'co2' or 'external'")
else:
F[:,10] = F_landuse
# Volcanic and solar copied straight to the output arrays
F[:,11] = F_volcanic
F[:,12] = F_solar
# multiply by scale factors
F[0,:] = F[0,:] * scale[0,:]
else:
if np.isscalar(other_rf):
F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf
else:
F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf[0]
F[0,0] = F[0,0] * scale[0]
if restart_in == False:
# Update the thermal response boxes
T_j[0,:] = (q[0,:]/d)*(np.sum(F[0,:]))
# Sum the thermal response boxes to get the total temperature anomaly
T[0]=np.sum(T_j[0,:],axis=-1)
for t in range(1,nt):
if emissions_driven:
if useMultigas:
if t == 1:
time_scale_sf = 0.16
# Calculate concentrations
# a. CARBON DIOXIDE
# Firstly add any oxidised methane from last year to the CO2
# pool
oxidised_CH4 = ((C[t-1,1]-C_pi[1]) *
(1.0 - np.exp(-1.0/lifetimes[1])) *
(molwt.C/molwt.CH4 * 0.001 * oxCH4_frac * fossilCH4_frac[t]))
oxidised_CH4 = np.max((oxidised_CH4, 0))
C[t,0], C_acc[t], R_i[t,:], time_scale_sf = carbon_cycle(
np.sum(emissions[t-1,1:3]),
C_acc[t-1],
T[t-1],
r0,
rc,
rt,
iirf_max,
time_scale_sf,
a,
tau,
iirf_h,
R_i[t-1,:] + oxidised_CH4,
C_pi[0],
C[t-1,0],
np.sum(emissions[t,1:3])
)
# b. METHANE
C[t,1] = emis_to_conc(
C[t-1,1],
emissions[t-1,3]+natural[t,0],
emissions[t,3]+natural[t,0],
1.0,
lifetimes[1],
1.0/emis2conc[1]
)
# c. NITROUS OXIDE
C[t,2] = emis_to_conc(
C[t-1,2],
emissions[t-1,4]+natural[t,1],
emissions[t,4]+natural[t,1],
1.0,
lifetimes[2],
1.0/emis2conc[2]
)
# d. OTHER WMGHGs
C[t,3:] = emis_to_conc(
C[t-1,3:],
emissions[t-1,12:],
emissions[t,12:],
1.0,
np.array(lifetimes[3:]),
1.0/emis2conc[3:]
)
# 2. Radiative forcing
F[t,0:3] = ghg(C[t,0:3], C_pi[0:3], F2x=F2x)
F[t,3] = np.sum((C[t,3:] - C_pi[3:]) * radeff.aslist[3:]
* 0.001)
if useStevenson:
F[t,4] = ozone_tr.stevenson(emissions[t,:],
C[t,1],
T=T[t-1],
feedback=useTropO3TFeedback,
fix_pre1850_RCP=fixPre1850RCP)
else:
F[t,4] = ozone_tr.regress(emissions[t,:], beta=b_tro3)
F[t,5] = ozone_st.magicc(C[t,15:], C_pi[15:])
F[t,6] = h2o_st.linear(F[t,1], ratio=stwv_from_ch4)
# multiply by scale factors
F[t,:] = F[t,:] * scale[t,:]
# 3. Temperature
# Update the thermal response boxes
T_j[t,:] = forc_to_temp(
T_j[t-1,:], q[t,:], d, F[t,:], e=efficacy)
# Sum the thermal response boxes to get the total temperature
T[t]=np.sum(T_j[t,:],axis=-1)
else:
if t == 1:
time_scale_sf = 0.16
C[t,0], C_acc[t], R_i[t,:], time_scale_sf = carbon_cycle(
emissions[t-1],
C_acc[t-1],
T[t-1],
r0,
rc,
rt,
iirf_max,
time_scale_sf,
a,
tau,
iirf_h,
R_i[t-1,:],
C_pi[0],
C[t-1,0],
emissions[t]
)
if np.isscalar(other_rf):
F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf
else:
F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf[t]
F[t,0] = F[t,0] * scale[t]
T_j[t,:] = forc_to_temp(T_j[t-1,:], q[t,:], d, F[t,:])
T[t]=np.sum(T_j[t,:],axis=-1)
else:
if useMultigas:
F[t,0:3] = ghg(C[t,0:3], C_pi[0:3], F2x=F2x)
F[t,3] = np.sum((C[t,3:] - C_pi[3:]) * radeff.aslist[3:]
* 0.001)
F[t,5] = ozone_st.magicc(C[t,15:], C_pi[15:])
F[t,6] = h2o_st.linear(F[t,1], ratio=stwv_from_ch4)
# multiply by scale factors
F[t,:] = F[t,:] * scale[t,:]
# 3. Temperature
# Update the thermal response boxes
T_j[t,:] = T_j[t,:] = forc_to_temp(
T_j[t-1,:], q[t,:], d, F[t,:], e=efficacy)
# Sum the thermal response boxes to get the total temperature
T[t]=np.sum(T_j[t,:],axis=-1)
else:
if np.isscalar(other_rf):
F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf
else:
F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf[t]
F[t,0] = F[t,0] * scale[t]
T_j[t,:] = forc_to_temp(T_j[t-1,:], q[t,:], d, F[t,:])
T[t]=np.sum(T_j[t,:],axis=-1)
if not useMultigas:
C = np.squeeze(C)
F = np.squeeze(F)
if restart_out:
if useMultigas:
E_minus1 = np.sum(emissions[-1,1:3])
else:
E_minus1 = emissions[-1]
restart_out_val=(R_i[-1],T_j[-1],C_acc[-1],E_minus1)
return C, F, T, restart_out_val
else:
return C, F, T | |
status.go | package apicommon
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"runtime"
"strconv"
"strings"
"sync"
"github.com/golang/glog"
"github.com/open-horizon/anax/config"
"github.com/open-horizon/anax/events"
"github.com/open-horizon/anax/exchange"
"github.com/open-horizon/anax/version"
)
type Configuration struct {
ExchangeAPI string `json:"exchange_api"`
ExchangeVersion string `json:"exchange_version"`
ReqExchVersion string `json:"required_exchange_version"`
Arch string `json:"architecture"`
HorizonVersion string `json:"horizon_version"`
}
type Info struct {
Geths []Geth `json:"geth"`
Configuration *Configuration `json:"configuration"`
Connectivity map[string]bool `json:"connectivity"`
}
func NewInfo(httpClientFactory *config.HTTPClientFactory, exchangeUrl string) *Info {
exch_version, err := exchange.GetExchangeVersion(httpClientFactory, exchangeUrl)
if err != nil {
glog.Errorf("Failed to get exchange version: %v", err)
}
return &Info{
Geths: []Geth{},
Configuration: &Configuration{
ExchangeAPI: exchangeUrl,
ExchangeVersion: exch_version,
ReqExchVersion: version.REQUIRED_EXCHANGE_VERSION,
Arch: runtime.GOARCH,
HorizonVersion: version.HORIZON_VERSION,
},
Connectivity: map[string]bool{},
}
}
func (info *Info) AddGeth(geth *Geth) *Info {
info.Geths = append(info.Geths, *geth)
return info
}
// Geth is an external type exposing the health of the go-ethereum process used by this anax instance
type Geth struct {
NetPeerCount int64 `json:"net_peer_count"`
EthSyncing bool `json:"eth_syncing"`
EthBlockNumber int64 `json:"eth_block_number"`
EthAccounts []string `json:"eth_accounts"`
EthBalance string `json:"eth_balance"` // a string b/c this is a huge number
}
func NewGeth() *Geth {
return &Geth{
NetPeerCount: -1,
EthSyncing: false,
EthBlockNumber: -1,
EthAccounts: []string{},
EthBalance: "",
}
}
func WriteGethStatus(gethURL string, geth *Geth) error {
singleResult := func(meth string, params []string) interface{} {
serial, err := json.Marshal(map[string]interface{}{"jsonrpc": "2.0", "method": meth, "params": params, "id": 1})
if err != nil {
glog.Error(err)
return ""
}
glog.V(5).Infof("encoded: %v", string(serial))
resp, err := http.Post(gethURL, "application/json", bytes.NewBuffer(serial))
if err != nil {
glog.Error(err)
return ""
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
glog.Error(err)
return ""
}
var m map[string]interface{}
err = json.Unmarshal(b, &m)
if err != nil {
glog.Error(err)
return ""
}
glog.V(2).Infof("returned: %v", m)
return m["result"]
}
// the return val is either a boolean if false, or an object
switch singleResult("eth_syncing", []string{}).(type) {
case bool:
geth.EthSyncing = false
default:
geth.EthSyncing = true
}
// get current the number of the current block
blockStr := singleResult("eth_blockNumber", []string{}).(string)
if blockStr != "" {
blockNum, err := strconv.ParseInt(strings.TrimPrefix(blockStr, "0x"), 16, 64)
if err != nil {
return err
}
geth.EthBlockNumber = blockNum
}
// get number of peers
peerStr := singleResult("net_peerCount", []string{}).(string)
if peerStr != "" {
peers, err := strconv.ParseInt(strings.TrimPrefix(peerStr, "0x"), 16, 64)
if err != nil {
return err
}
geth.NetPeerCount = peers
}
// get the account
if account := singleResult("eth_accounts", []string{}); account != nil {
switch account.(type) {
case []interface{}:
a1 := account.([]interface{})
geth.EthAccounts = make([]string, len(a1))
for i := range a1 {
geth.EthAccounts[i] = a1[i].(string)
}
default:
geth.EthAccounts = []string{}
}
}
// get account balance
if len(geth.EthAccounts) == 0 {
geth.EthBalance = "0x0"
} else {
eth_balance_params := make([]string, 2)
eth_balance_params[0] = geth.EthAccounts[0]
eth_balance_params[1] = "latest"
geth.EthBalance = singleResult("eth_getBalance", eth_balance_params).(string)
}
return nil
}
type BlockchainState struct {
ready bool // the blockchain is ready
writable bool // the blockchain is writable
service string // the network endpoint name of the container
servicePort string // the network port of the container
}
func (b *BlockchainState) GetService() string {
return b.service
}
func (b *BlockchainState) GetServicePort() string {
return b.servicePort
}
// Functions to manage the blockchain state events so that the status API has accurate info to display.
func HandleNewBCInit(ev *events.BlockchainClientInitializedMessage, bcState map[string]map[string]BlockchainState, bcStateLock *sync.Mutex) {
bcStateLock.Lock()
defer bcStateLock.Unlock()
nameMap := GetBCNameMap(ev.BlockchainType(), bcState)
namedBC, ok := nameMap[ev.BlockchainInstance()]
if !ok {
nameMap[ev.BlockchainInstance()] = BlockchainState{
ready: true,
writable: false,
service: ev.ServiceName(),
servicePort: ev.ServicePort(),
}
} else {
namedBC.ready = true
namedBC.service = ev.ServiceName()
namedBC.servicePort = ev.ServicePort()
}
}
func HandleStoppingBC(ev *events.BlockchainClientStoppingMessage, bcState map[string]map[string]BlockchainState, bcStateLock *sync.Mutex) {
bcStateLock.Lock()
defer bcStateLock.Unlock()
nameMap := GetBCNameMap(ev.BlockchainType(), bcState)
delete(nameMap, ev.BlockchainInstance())
}
func | (typeName string, bcState map[string]map[string]BlockchainState) map[string]BlockchainState {
nameMap, ok := bcState[typeName]
if !ok {
bcState[typeName] = make(map[string]BlockchainState)
nameMap = bcState[typeName]
}
return nameMap
}
| GetBCNameMap |
dataset_results.py | import gc
import numpy as np
def | (dataset, model, binary=False):
x = np.array([dataset[i][0][0] for i in range(len(dataset))])
y_true = np.array([dataset[i][1][0] for i in range(len(dataset))])
y_pred = model.predict(x, batch_size=1, verbose=0).flatten()
if binary:
y_true = y_true[..., 0].flatten()
else:
y_true = np.argmax(y_true, axis=-1).flatten()
del x
gc.collect()
return y_true, y_pred
| dataset_results |
icon_cloud_done.rs | pub struct IconCloudDone {
props: crate::Props,
}
impl yew::Component for IconCloudDone {
type Properties = crate::Props;
type Message = ();
fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self
{
Self { props }
}
fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender
{
true
} |
fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender
{
false
}
fn view(&self) -> yew::prelude::Html
{
yew::prelude::html! {
<svg
class=self.props.class.unwrap_or("")
width=self.props.size.unwrap_or(24).to_string()
height=self.props.size.unwrap_or(24).to_string()
viewBox="0 0 24 24"
fill=self.props.fill.unwrap_or("none")
stroke=self.props.color.unwrap_or("currentColor")
stroke-width=self.props.stroke_width.unwrap_or(2).to_string()
stroke-linecap=self.props.stroke_linecap.unwrap_or("round")
stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round")
>
<svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0V0z" fill="none"/><path d="M19.35 10.04C18.67 6.59 15.64 4 12 4 9.11 4 6.6 5.64 5.35 8.04 2.34 8.36 0 10.91 0 14c0 3.31 2.69 6 6 6h13c2.76 0 5-2.24 5-5 0-2.64-2.05-4.78-4.65-4.96zm-8.64 6.25c-.39.39-1.02.39-1.41 0L7.2 14.2c-.39-.39-.39-1.02 0-1.41.39-.39 1.02-.39 1.41 0L10 14.18l4.48-4.48c.39-.39 1.02-.39 1.41 0 .39.39.39 1.02 0 1.41l-5.18 5.18z"/></svg>
</svg>
}
}
} | |
0001_initial.py | # Generated by Django 3.2 on 2021-05-04 07:35
import colorfield.fields
from django.conf import settings
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
| initial = True
dependencies = [
('places', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ProblemStatus',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18)),
],
),
migrations.CreateModel(
name='ProblemLabel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=500, verbose_name='описание проблемы')),
('created_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='время добавления')),
('house_number', models.CharField(blank=True, max_length=50, verbose_name='номер дома')),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='добавлено пользователем')),
('county', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='places.county', verbose_name='район области')),
('place', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='places.place', verbose_name='населённый пункт')),
('road', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='places.road', verbose_name='дорога')),
('state_district', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='places.statedistrict', verbose_name='район города')),
('status', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='problem_register.problemstatus', verbose_name='статус проблемы')),
],
),
]
|
|
bootstrap.min.js | /*!
* Bootstrap v4.0.0 (https://getbootstrap.com) | // sourceMappingURL=bootstrap.min.js.map | * Copyright 2011-2018 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
*/
!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("jquery"),require("popper.js")):"function"==typeof define&&define.amd?define(["exports","jquery","popper.js"],e):e(t.bootstrap={},t.jQuery,t.Popper)}(this,function(t,e,n){"use strict";function i(t,e){for(var n=0;n<e.length;n++){var i=e[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}function s(t,e,n){return e&&i(t.prototype,e),n&&i(t,n),t}function r(){return(r=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var n=arguments[e];for(var i in n)Object.prototype.hasOwnProperty.call(n,i)&&(t[i]=n[i])}return t}).apply(this,arguments)}e=e&&e.hasOwnProperty("default")?e.default:e,n=n&&n.hasOwnProperty("default")?n.default:n;var o,a,l,h,c,u,f,d,_,g,p,m,v,E,T,y,C,I,A,b,D,S,w,N,O,k,P=function(t){var e=!1;function n(e){var n=this,s=!1;return t(this).one(i.TRANSITION_END,function(){s=!0}),setTimeout(function(){s||i.triggerTransitionEnd(n)},e),this}var i={TRANSITION_END:"bsTransitionEnd",getUID:function(t){do{t+=~~(1e6*Math.random())}while(document.getElementById(t));return t},getSelectorFromElement:function(e){var n,i=e.getAttribute("data-target");i&&"#"!==i||(i=e.getAttribute("href")||""),"#"===i.charAt(0)&&(n=i,i=n="function"==typeof t.escapeSelector?t.escapeSelector(n).substr(1):n.replace(/(:|\.|\[|\]|,|=|@)/g,"\\$1"));try{return t(document).find(i).length>0?i:null}catch(t){return null}},reflow:function(t){return t.offsetHeight},triggerTransitionEnd:function(n){t(n).trigger(e.end)},supportsTransitionEnd:function(){return Boolean(e)},isElement:function(t){return(t[0]||t).nodeType},typeCheckConfig:function(t,e,n){for(var s in n)if(Object.prototype.hasOwnProperty.call(n,s)){var r=n[s],o=e[s],a=o&&i.isElement(o)?"element":(l=o,{}.toString.call(l).match(/\s([a-zA-Z]+)/)[1].toLowerCase());if(!new RegExp(r).test(a))throw new Error(t.toUpperCase()+': Option "'+s+'" provided type "'+a+'" but expected type "'+r+'".')}var l}};return e=("undefined"==typeof window||!window.QUnit)&&{end:"transitionend"},t.fn.emulateTransitionEnd=n,i.supportsTransitionEnd()&&(t.event.special[i.TRANSITION_END]={bindType:e.end,delegateType:e.end,handle:function(e){if(t(e.target).is(this))return e.handleObj.handler.apply(this,arguments)}}),i}(e),L=(a="alert",h="."+(l="bs.alert"),c=(o=e).fn[a],u={CLOSE:"close"+h,CLOSED:"closed"+h,CLICK_DATA_API:"click"+h+".data-api"},f="alert",d="fade",_="show",g=function(){function t(t){this._element=t}var e=t.prototype;return e.close=function(t){t=t||this._element;var e=this._getRootElement(t);this._triggerCloseEvent(e).isDefaultPrevented()||this._removeElement(e)},e.dispose=function(){o.removeData(this._element,l),this._element=null},e._getRootElement=function(t){var e=P.getSelectorFromElement(t),n=!1;return e&&(n=o(e)[0]),n||(n=o(t).closest("."+f)[0]),n},e._triggerCloseEvent=function(t){var e=o.Event(u.CLOSE);return o(t).trigger(e),e},e._removeElement=function(t){var e=this;o(t).removeClass(_),P.supportsTransitionEnd()&&o(t).hasClass(d)?o(t).one(P.TRANSITION_END,function(n){return e._destroyElement(t,n)}).emulateTransitionEnd(150):this._destroyElement(t)},e._destroyElement=function(t){o(t).detach().trigger(u.CLOSED).remove()},t._jQueryInterface=function(e){return this.each(function(){var n=o(this),i=n.data(l);i||(i=new t(this),n.data(l,i)),"close"===e&&i[e](this)})},t._handleDismiss=function(t){return function(e){e&&e.preventDefault(),t.close(this)}},s(t,null,[{key:"VERSION",get:function(){return"4.0.0"}}]),t}(),o(document).on(u.CLICK_DATA_API,'[data-dismiss="alert"]',g._handleDismiss(new g)),o.fn[a]=g._jQueryInterface,o.fn[a].Constructor=g,o.fn[a].noConflict=function(){return o.fn[a]=c,g._jQueryInterface},g),R=(m="button",E="."+(v="bs.button"),T=".data-api",y=(p=e).fn[m],C="active",I="btn",A="focus",b='[data-toggle^="button"]',D='[data-toggle="buttons"]',S="input",w=".active",N=".btn",O={CLICK_DATA_API:"click"+E+T,FOCUS_BLUR_DATA_API:"focus"+E+T+" blur"+E+T},k=function(){function t(t){this._element=t}var e=t.prototype;return e.toggle=function(){var t=!0,e=!0,n=p(this._element).closest(D)[0];if(n){var i=p(this._element).find(S)[0];if(i){if("radio"===i.type)if(i.checked&&p(this._element).hasClass(C))t=!1;else{var s=p(n).find(w)[0];s&&p(s).removeClass(C)}if(t){if(i.hasAttribute("disabled")||n.hasAttribute("disabled")||i.classList.contains("disabled")||n.classList.contains("disabled"))return;i.checked=!p(this._element).hasClass(C),p(i).trigger("change")}i.focus(),e=!1}}e&&this._element.setAttribute("aria-pressed",!p(this._element).hasClass(C)),t&&p(this._element).toggleClass(C)},e.dispose=function(){p.removeData(this._element,v),this._element=null},t._jQueryInterface=function(e){return this.each(function(){var n=p(this).data(v);n||(n=new t(this),p(this).data(v,n)),"toggle"===e&&n[e]()})},s(t,null,[{key:"VERSION",get:function(){return"4.0.0"}}]),t}(),p(document).on(O.CLICK_DATA_API,b,function(t){t.preventDefault();var e=t.target;p(e).hasClass(I)||(e=p(e).closest(N)),k._jQueryInterface.call(p(e),"toggle")}).on(O.FOCUS_BLUR_DATA_API,b,function(t){var e=p(t.target).closest(N)[0];p(e).toggleClass(A,/^focus(in)?$/.test(t.type))}),p.fn[m]=k._jQueryInterface,p.fn[m].Constructor=k,p.fn[m].noConflict=function(){return p.fn[m]=y,k._jQueryInterface},k),j=function(t){var e="carousel",n="bs.carousel",i="."+n,o=t.fn[e],a={interval:5e3,keyboard:!0,slide:!1,pause:"hover",wrap:!0},l={interval:"(number|boolean)",keyboard:"boolean",slide:"(boolean|string)",pause:"(string|boolean)",wrap:"boolean"},h="next",c="prev",u="left",f="right",d={SLIDE:"slide"+i,SLID:"slid"+i,KEYDOWN:"keydown"+i,MOUSEENTER:"mouseenter"+i,MOUSELEAVE:"mouseleave"+i,TOUCHEND:"touchend"+i,LOAD_DATA_API:"load"+i+".data-api",CLICK_DATA_API:"click"+i+".data-api"},_="carousel",g="active",p="slide",m="carousel-item-right",v="carousel-item-left",E="carousel-item-next",T="carousel-item-prev",y={ACTIVE:".active",ACTIVE_ITEM:".active.carousel-item",ITEM:".carousel-item",NEXT_PREV:".carousel-item-next, .carousel-item-prev",INDICATORS:".carousel-indicators",DATA_SLIDE:"[data-slide], [data-slide-to]",DATA_RIDE:'[data-ride="carousel"]'},C=function(){function o(e,n){this._items=null,this._interval=null,this._activeElement=null,this._isPaused=!1,this._isSliding=!1,this.touchTimeout=null,this._config=this._getConfig(n),this._element=t(e)[0],this._indicatorsElement=t(this._element).find(y.INDICATORS)[0],this._addEventListeners()}var C=o.prototype;return C.next=function(){this._isSliding||this._slide(h)},C.nextWhenVisible=function(){!document.hidden&&t(this._element).is(":visible")&&"hidden"!==t(this._element).css("visibility")&&this.next()},C.prev=function(){this._isSliding||this._slide(c)},C.pause=function(e){e||(this._isPaused=!0),t(this._element).find(y.NEXT_PREV)[0]&&P.supportsTransitionEnd()&&(P.triggerTransitionEnd(this._element),this.cycle(!0)),clearInterval(this._interval),this._interval=null},C.cycle=function(t){t||(this._isPaused=!1),this._interval&&(clearInterval(this._interval),this._interval=null),this._config.interval&&!this._isPaused&&(this._interval=setInterval((document.visibilityState?this.nextWhenVisible:this.next).bind(this),this._config.interval))},C.to=function(e){var n=this;this._activeElement=t(this._element).find(y.ACTIVE_ITEM)[0];var i=this._getItemIndex(this._activeElement);if(!(e>this._items.length-1||e<0))if(this._isSliding)t(this._element).one(d.SLID,function(){return n.to(e)});else{if(i===e)return this.pause(),void this.cycle();var s=e>i?h:c;this._slide(s,this._items[e])}},C.dispose=function(){t(this._element).off(i),t.removeData(this._element,n),this._items=null,this._config=null,this._element=null,this._interval=null,this._isPaused=null,this._isSliding=null,this._activeElement=null,this._indicatorsElement=null},C._getConfig=function(t){return t=r({},a,t),P.typeCheckConfig(e,t,l),t},C._addEventListeners=function(){var e=this;this._config.keyboard&&t(this._element).on(d.KEYDOWN,function(t){return e._keydown(t)}),"hover"===this._config.pause&&(t(this._element).on(d.MOUSEENTER,function(t){return e.pause(t)}).on(d.MOUSELEAVE,function(t){return e.cycle(t)}),"ontouchstart"in document.documentElement&&t(this._element).on(d.TOUCHEND,function(){e.pause(),e.touchTimeout&&clearTimeout(e.touchTimeout),e.touchTimeout=setTimeout(function(t){return e.cycle(t)},500+e._config.interval)}))},C._keydown=function(t){if(!/input|textarea/i.test(t.target.tagName))switch(t.which){case 37:t.preventDefault(),this.prev();break;case 39:t.preventDefault(),this.next()}},C._getItemIndex=function(e){return this._items=t.makeArray(t(e).parent().find(y.ITEM)),this._items.indexOf(e)},C._getItemByDirection=function(t,e){var n=t===h,i=t===c,s=this._getItemIndex(e),r=this._items.length-1;if((i&&0===s||n&&s===r)&&!this._config.wrap)return e;var o=(s+(t===c?-1:1))%this._items.length;return-1===o?this._items[this._items.length-1]:this._items[o]},C._triggerSlideEvent=function(e,n){var i=this._getItemIndex(e),s=this._getItemIndex(t(this._element).find(y.ACTIVE_ITEM)[0]),r=t.Event(d.SLIDE,{relatedTarget:e,direction:n,from:s,to:i});return t(this._element).trigger(r),r},C._setActiveIndicatorElement=function(e){if(this._indicatorsElement){t(this._indicatorsElement).find(y.ACTIVE).removeClass(g);var n=this._indicatorsElement.children[this._getItemIndex(e)];n&&t(n).addClass(g)}},C._slide=function(e,n){var i,s,r,o=this,a=t(this._element).find(y.ACTIVE_ITEM)[0],l=this._getItemIndex(a),c=n||a&&this._getItemByDirection(e,a),_=this._getItemIndex(c),C=Boolean(this._interval);if(e===h?(i=v,s=E,r=u):(i=m,s=T,r=f),c&&t(c).hasClass(g))this._isSliding=!1;else if(!this._triggerSlideEvent(c,r).isDefaultPrevented()&&a&&c){this._isSliding=!0,C&&this.pause(),this._setActiveIndicatorElement(c);var I=t.Event(d.SLID,{relatedTarget:c,direction:r,from:l,to:_});P.supportsTransitionEnd()&&t(this._element).hasClass(p)?(t(c).addClass(s),P.reflow(c),t(a).addClass(i),t(c).addClass(i),t(a).one(P.TRANSITION_END,function(){t(c).removeClass(i+" "+s).addClass(g),t(a).removeClass(g+" "+s+" "+i),o._isSliding=!1,setTimeout(function(){return t(o._element).trigger(I)},0)}).emulateTransitionEnd(600)):(t(a).removeClass(g),t(c).addClass(g),this._isSliding=!1,t(this._element).trigger(I)),C&&this.cycle()}},o._jQueryInterface=function(e){return this.each(function(){var i=t(this).data(n),s=r({},a,t(this).data());"object"==typeof e&&(s=r({},s,e));var l="string"==typeof e?e:s.slide;if(i||(i=new o(this,s),t(this).data(n,i)),"number"==typeof e)i.to(e);else if("string"==typeof l){if("undefined"==typeof i[l])throw new TypeError('No method named "'+l+'"');i[l]()}else s.interval&&(i.pause(),i.cycle())})},o._dataApiClickHandler=function(e){var i=P.getSelectorFromElement(this);if(i){var s=t(i)[0];if(s&&t(s).hasClass(_)){var a=r({},t(s).data(),t(this).data()),l=this.getAttribute("data-slide-to");l&&(a.interval=!1),o._jQueryInterface.call(t(s),a),l&&t(s).data(n).to(l),e.preventDefault()}}},s(o,null,[{key:"VERSION",get:function(){return"4.0.0"}},{key:"Default",get:function(){return a}}]),o}();return t(document).on(d.CLICK_DATA_API,y.DATA_SLIDE,C._dataApiClickHandler),t(window).on(d.LOAD_DATA_API,function(){t(y.DATA_RIDE).each(function(){var e=t(this);C._jQueryInterface.call(e,e.data())})}),t.fn[e]=C._jQueryInterface,t.fn[e].Constructor=C,t.fn[e].noConflict=function(){return t.fn[e]=o,C._jQueryInterface},C}(e),H=function(t){var e="collapse",n="bs.collapse",i="."+n,o=t.fn[e],a={toggle:!0,parent:""},l={toggle:"boolean",parent:"(string|element)"},h={SHOW:"show"+i,SHOWN:"shown"+i,HIDE:"hide"+i,HIDDEN:"hidden"+i,CLICK_DATA_API:"click"+i+".data-api"},c="show",u="collapse",f="collapsing",d="collapsed",_="width",g="height",p={ACTIVES:".show, .collapsing",DATA_TOGGLE:'[data-toggle="collapse"]'},m=function(){function i(e,n){this._isTransitioning=!1,this._element=e,this._config=this._getConfig(n),this._triggerArray=t.makeArray(t('[data-toggle="collapse"][href="#'+e.id+'"],[data-toggle="collapse"][data-target="#'+e.id+'"]'));for(var i=t(p.DATA_TOGGLE),s=0;s<i.length;s++){var r=i[s],o=P.getSelectorFromElement(r);null!==o&&t(o).filter(e).length>0&&(this._selector=o,this._triggerArray.push(r))}this._parent=this._config.parent?this._getParent():null,this._config.parent||this._addAriaAndCollapsedClass(this._element,this._triggerArray),this._config.toggle&&this.toggle()}var o=i.prototype;return o.toggle=function(){t(this._element).hasClass(c)?this.hide():this.show()},o.show=function(){var e,s,r=this;if(!this._isTransitioning&&!t(this._element).hasClass(c)&&(this._parent&&0===(e=t.makeArray(t(this._parent).find(p.ACTIVES).filter('[data-parent="'+this._config.parent+'"]'))).length&&(e=null),!(e&&(s=t(e).not(this._selector).data(n))&&s._isTransitioning))){var o=t.Event(h.SHOW);if(t(this._element).trigger(o),!o.isDefaultPrevented()){e&&(i._jQueryInterface.call(t(e).not(this._selector),"hide"),s||t(e).data(n,null));var a=this._getDimension();t(this._element).removeClass(u).addClass(f),this._element.style[a]=0,this._triggerArray.length>0&&t(this._triggerArray).removeClass(d).attr("aria-expanded",!0),this.setTransitioning(!0);var l=function(){t(r._element).removeClass(f).addClass(u).addClass(c),r._element.style[a]="",r.setTransitioning(!1),t(r._element).trigger(h.SHOWN)};if(P.supportsTransitionEnd()){var _="scroll"+(a[0].toUpperCase()+a.slice(1));t(this._element).one(P.TRANSITION_END,l).emulateTransitionEnd(600),this._element.style[a]=this._element[_]+"px"}else l()}}},o.hide=function(){var e=this;if(!this._isTransitioning&&t(this._element).hasClass(c)){var n=t.Event(h.HIDE);if(t(this._element).trigger(n),!n.isDefaultPrevented()){var i=this._getDimension();if(this._element.style[i]=this._element.getBoundingClientRect()[i]+"px",P.reflow(this._element),t(this._element).addClass(f).removeClass(u).removeClass(c),this._triggerArray.length>0)for(var s=0;s<this._triggerArray.length;s++){var r=this._triggerArray[s],o=P.getSelectorFromElement(r);if(null!==o)t(o).hasClass(c)||t(r).addClass(d).attr("aria-expanded",!1)}this.setTransitioning(!0);var a=function(){e.setTransitioning(!1),t(e._element).removeClass(f).addClass(u).trigger(h.HIDDEN)};this._element.style[i]="",P.supportsTransitionEnd()?t(this._element).one(P.TRANSITION_END,a).emulateTransitionEnd(600):a()}}},o.setTransitioning=function(t){this._isTransitioning=t},o.dispose=function(){t.removeData(this._element,n),this._config=null,this._parent=null,this._element=null,this._triggerArray=null,this._isTransitioning=null},o._getConfig=function(t){return(t=r({},a,t)).toggle=Boolean(t.toggle),P.typeCheckConfig(e,t,l),t},o._getDimension=function(){return t(this._element).hasClass(_)?_:g},o._getParent=function(){var e=this,n=null;P.isElement(this._config.parent)?(n=this._config.parent,"undefined"!=typeof this._config.parent.jquery&&(n=this._config.parent[0])):n=t(this._config.parent)[0];var s='[data-toggle="collapse"][data-parent="'+this._config.parent+'"]';return t(n).find(s).each(function(t,n){e._addAriaAndCollapsedClass(i._getTargetFromElement(n),[n])}),n},o._addAriaAndCollapsedClass=function(e,n){if(e){var i=t(e).hasClass(c);n.length>0&&t(n).toggleClass(d,!i).attr("aria-expanded",i)}},i._getTargetFromElement=function(e){var n=P.getSelectorFromElement(e);return n?t(n)[0]:null},i._jQueryInterface=function(e){return this.each(function(){var s=t(this),o=s.data(n),l=r({},a,s.data(),"object"==typeof e&&e);if(!o&&l.toggle&&/show|hide/.test(e)&&(l.toggle=!1),o||(o=new i(this,l),s.data(n,o)),"string"==typeof e){if("undefined"==typeof o[e])throw new TypeError('No method named "'+e+'"');o[e]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.0.0"}},{key:"Default",get:function(){return a}}]),i}();return t(document).on(h.CLICK_DATA_API,p.DATA_TOGGLE,function(e){"A"===e.currentTarget.tagName&&e.preventDefault();var i=t(this),s=P.getSelectorFromElement(this);t(s).each(function(){var e=t(this),s=e.data(n)?"toggle":i.data();m._jQueryInterface.call(e,s)})}),t.fn[e]=m._jQueryInterface,t.fn[e].Constructor=m,t.fn[e].noConflict=function(){return t.fn[e]=o,m._jQueryInterface},m}(e),W=function(t){var e="dropdown",i="bs.dropdown",o="."+i,a=".data-api",l=t.fn[e],h=new RegExp("38|40|27"),c={HIDE:"hide"+o,HIDDEN:"hidden"+o,SHOW:"show"+o,SHOWN:"shown"+o,CLICK:"click"+o,CLICK_DATA_API:"click"+o+a,KEYDOWN_DATA_API:"keydown"+o+a,KEYUP_DATA_API:"keyup"+o+a},u="disabled",f="show",d="dropup",_="dropright",g="dropleft",p="dropdown-menu-right",m="dropdown-menu-left",v="position-static",E='[data-toggle="dropdown"]',T=".dropdown form",y=".dropdown-menu",C=".navbar-nav",I=".dropdown-menu .dropdown-item:not(.disabled)",A="top-start",b="top-end",D="bottom-start",S="bottom-end",w="right-start",N="left-start",O={offset:0,flip:!0,boundary:"scrollParent"},k={offset:"(number|string|function)",flip:"boolean",boundary:"(string|element)"},L=function(){function a(t,e){this._element=t,this._popper=null,this._config=this._getConfig(e),this._menu=this._getMenuElement(),this._inNavbar=this._detectNavbar(),this._addEventListeners()}var l=a.prototype;return l.toggle=function(){if(!this._element.disabled&&!t(this._element).hasClass(u)){var e=a._getParentFromElement(this._element),i=t(this._menu).hasClass(f);if(a._clearMenus(),!i){var s={relatedTarget:this._element},r=t.Event(c.SHOW,s);if(t(e).trigger(r),!r.isDefaultPrevented()){if(!this._inNavbar){if("undefined"==typeof n)throw new TypeError("Bootstrap dropdown require Popper.js (https://popper.js.org)");var o=this._element;t(e).hasClass(d)&&(t(this._menu).hasClass(m)||t(this._menu).hasClass(p))&&(o=e),"scrollParent"!==this._config.boundary&&t(e).addClass(v),this._popper=new n(o,this._menu,this._getPopperConfig())}"ontouchstart"in document.documentElement&&0===t(e).closest(C).length&&t("body").children().on("mouseover",null,t.noop),this._element.focus(),this._element.setAttribute("aria-expanded",!0),t(this._menu).toggleClass(f),t(e).toggleClass(f).trigger(t.Event(c.SHOWN,s))}}}},l.dispose=function(){t.removeData(this._element,i),t(this._element).off(o),this._element=null,this._menu=null,null!==this._popper&&(this._popper.destroy(),this._popper=null)},l.update=function(){this._inNavbar=this._detectNavbar(),null!==this._popper&&this._popper.scheduleUpdate()},l._addEventListeners=function(){var e=this;t(this._element).on(c.CLICK,function(t){t.preventDefault(),t.stopPropagation(),e.toggle()})},l._getConfig=function(n){return n=r({},this.constructor.Default,t(this._element).data(),n),P.typeCheckConfig(e,n,this.constructor.DefaultType),n},l._getMenuElement=function(){if(!this._menu){var e=a._getParentFromElement(this._element);this._menu=t(e).find(y)[0]}return this._menu},l._getPlacement=function(){var e=t(this._element).parent(),n=D;return e.hasClass(d)?(n=A,t(this._menu).hasClass(p)&&(n=b)):e.hasClass(_)?n=w:e.hasClass(g)?n=N:t(this._menu).hasClass(p)&&(n=S),n},l._detectNavbar=function(){return t(this._element).closest(".navbar").length>0},l._getPopperConfig=function(){var t=this,e={};return"function"==typeof this._config.offset?e.fn=function(e){return e.offsets=r({},e.offsets,t._config.offset(e.offsets)||{}),e}:e.offset=this._config.offset,{placement:this._getPlacement(),modifiers:{offset:e,flip:{enabled:this._config.flip},preventOverflow:{boundariesElement:this._config.boundary}}}},a._jQueryInterface=function(e){return this.each(function(){var n=t(this).data(i);if(n||(n=new a(this,"object"==typeof e?e:null),t(this).data(i,n)),"string"==typeof e){if("undefined"==typeof n[e])throw new TypeError('No method named "'+e+'"');n[e]()}})},a._clearMenus=function(e){if(!e||3!==e.which&&("keyup"!==e.type||9===e.which))for(var n=t.makeArray(t(E)),s=0;s<n.length;s++){var r=a._getParentFromElement(n[s]),o=t(n[s]).data(i),l={relatedTarget:n[s]};if(o){var h=o._menu;if(t(r).hasClass(f)&&!(e&&("click"===e.type&&/input|textarea/i.test(e.target.tagName)||"keyup"===e.type&&9===e.which)&&t.contains(r,e.target))){var u=t.Event(c.HIDE,l);t(r).trigger(u),u.isDefaultPrevented()||("ontouchstart"in document.documentElement&&t("body").children().off("mouseover",null,t.noop),n[s].setAttribute("aria-expanded","false"),t(h).removeClass(f),t(r).removeClass(f).trigger(t.Event(c.HIDDEN,l)))}}}},a._getParentFromElement=function(e){var n,i=P.getSelectorFromElement(e);return i&&(n=t(i)[0]),n||e.parentNode},a._dataApiKeydownHandler=function(e){if((/input|textarea/i.test(e.target.tagName)?!(32===e.which||27!==e.which&&(40!==e.which&&38!==e.which||t(e.target).closest(y).length)):h.test(e.which))&&(e.preventDefault(),e.stopPropagation(),!this.disabled&&!t(this).hasClass(u))){var n=a._getParentFromElement(this),i=t(n).hasClass(f);if((i||27===e.which&&32===e.which)&&(!i||27!==e.which&&32!==e.which)){var s=t(n).find(I).get();if(0!==s.length){var r=s.indexOf(e.target);38===e.which&&r>0&&r--,40===e.which&&r<s.length-1&&r++,r<0&&(r=0),s[r].focus()}}else{if(27===e.which){var o=t(n).find(E)[0];t(o).trigger("focus")}t(this).trigger("click")}}},s(a,null,[{key:"VERSION",get:function(){return"4.0.0"}},{key:"Default",get:function(){return O}},{key:"DefaultType",get:function(){return k}}]),a}();return t(document).on(c.KEYDOWN_DATA_API,E,L._dataApiKeydownHandler).on(c.KEYDOWN_DATA_API,y,L._dataApiKeydownHandler).on(c.CLICK_DATA_API+" "+c.KEYUP_DATA_API,L._clearMenus).on(c.CLICK_DATA_API,E,function(e){e.preventDefault(),e.stopPropagation(),L._jQueryInterface.call(t(this),"toggle")}).on(c.CLICK_DATA_API,T,function(t){t.stopPropagation()}),t.fn[e]=L._jQueryInterface,t.fn[e].Constructor=L,t.fn[e].noConflict=function(){return t.fn[e]=l,L._jQueryInterface},L}(e),M=function(t){var e="modal",n="bs.modal",i="."+n,o=t.fn.modal,a={backdrop:!0,keyboard:!0,focus:!0,show:!0},l={backdrop:"(boolean|string)",keyboard:"boolean",focus:"boolean",show:"boolean"},h={HIDE:"hide"+i,HIDDEN:"hidden"+i,SHOW:"show"+i,SHOWN:"shown"+i,FOCUSIN:"focusin"+i,RESIZE:"resize"+i,CLICK_DISMISS:"click.dismiss"+i,KEYDOWN_DISMISS:"keydown.dismiss"+i,MOUSEUP_DISMISS:"mouseup.dismiss"+i,MOUSEDOWN_DISMISS:"mousedown.dismiss"+i,CLICK_DATA_API:"click"+i+".data-api"},c="modal-scrollbar-measure",u="modal-backdrop",f="modal-open",d="fade",_="show",g={DIALOG:".modal-dialog",DATA_TOGGLE:'[data-toggle="modal"]',DATA_DISMISS:'[data-dismiss="modal"]',FIXED_CONTENT:".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",STICKY_CONTENT:".sticky-top",NAVBAR_TOGGLER:".navbar-toggler"},p=function(){function o(e,n){this._config=this._getConfig(n),this._element=e,this._dialog=t(e).find(g.DIALOG)[0],this._backdrop=null,this._isShown=!1,this._isBodyOverflowing=!1,this._ignoreBackdropClick=!1,this._originalBodyPadding=0,this._scrollbarWidth=0}var p=o.prototype;return p.toggle=function(t){return this._isShown?this.hide():this.show(t)},p.show=function(e){var n=this;if(!this._isTransitioning&&!this._isShown){P.supportsTransitionEnd()&&t(this._element).hasClass(d)&&(this._isTransitioning=!0);var i=t.Event(h.SHOW,{relatedTarget:e});t(this._element).trigger(i),this._isShown||i.isDefaultPrevented()||(this._isShown=!0,this._checkScrollbar(),this._setScrollbar(),this._adjustDialog(),t(document.body).addClass(f),this._setEscapeEvent(),this._setResizeEvent(),t(this._element).on(h.CLICK_DISMISS,g.DATA_DISMISS,function(t){return n.hide(t)}),t(this._dialog).on(h.MOUSEDOWN_DISMISS,function(){t(n._element).one(h.MOUSEUP_DISMISS,function(e){t(e.target).is(n._element)&&(n._ignoreBackdropClick=!0)})}),this._showBackdrop(function(){return n._showElement(e)}))}},p.hide=function(e){var n=this;if(e&&e.preventDefault(),!this._isTransitioning&&this._isShown){var i=t.Event(h.HIDE);if(t(this._element).trigger(i),this._isShown&&!i.isDefaultPrevented()){this._isShown=!1;var s=P.supportsTransitionEnd()&&t(this._element).hasClass(d);s&&(this._isTransitioning=!0),this._setEscapeEvent(),this._setResizeEvent(),t(document).off(h.FOCUSIN),t(this._element).removeClass(_),t(this._element).off(h.CLICK_DISMISS),t(this._dialog).off(h.MOUSEDOWN_DISMISS),s?t(this._element).one(P.TRANSITION_END,function(t){return n._hideModal(t)}).emulateTransitionEnd(300):this._hideModal()}}},p.dispose=function(){t.removeData(this._element,n),t(window,document,this._element,this._backdrop).off(i),this._config=null,this._element=null,this._dialog=null,this._backdrop=null,this._isShown=null,this._isBodyOverflowing=null,this._ignoreBackdropClick=null,this._scrollbarWidth=null},p.handleUpdate=function(){this._adjustDialog()},p._getConfig=function(t){return t=r({},a,t),P.typeCheckConfig(e,t,l),t},p._showElement=function(e){var n=this,i=P.supportsTransitionEnd()&&t(this._element).hasClass(d);this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE||document.body.appendChild(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.scrollTop=0,i&&P.reflow(this._element),t(this._element).addClass(_),this._config.focus&&this._enforceFocus();var s=t.Event(h.SHOWN,{relatedTarget:e}),r=function(){n._config.focus&&n._element.focus(),n._isTransitioning=!1,t(n._element).trigger(s)};i?t(this._dialog).one(P.TRANSITION_END,r).emulateTransitionEnd(300):r()},p._enforceFocus=function(){var e=this;t(document).off(h.FOCUSIN).on(h.FOCUSIN,function(n){document!==n.target&&e._element!==n.target&&0===t(e._element).has(n.target).length&&e._element.focus()})},p._setEscapeEvent=function(){var e=this;this._isShown&&this._config.keyboard?t(this._element).on(h.KEYDOWN_DISMISS,function(t){27===t.which&&(t.preventDefault(),e.hide())}):this._isShown||t(this._element).off(h.KEYDOWN_DISMISS)},p._setResizeEvent=function(){var e=this;this._isShown?t(window).on(h.RESIZE,function(t){return e.handleUpdate(t)}):t(window).off(h.RESIZE)},p._hideModal=function(){var e=this;this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._isTransitioning=!1,this._showBackdrop(function(){t(document.body).removeClass(f),e._resetAdjustments(),e._resetScrollbar(),t(e._element).trigger(h.HIDDEN)})},p._removeBackdrop=function(){this._backdrop&&(t(this._backdrop).remove(),this._backdrop=null)},p._showBackdrop=function(e){var n=this,i=t(this._element).hasClass(d)?d:"";if(this._isShown&&this._config.backdrop){var s=P.supportsTransitionEnd()&&i;if(this._backdrop=document.createElement("div"),this._backdrop.className=u,i&&t(this._backdrop).addClass(i),t(this._backdrop).appendTo(document.body),t(this._element).on(h.CLICK_DISMISS,function(t){n._ignoreBackdropClick?n._ignoreBackdropClick=!1:t.target===t.currentTarget&&("static"===n._config.backdrop?n._element.focus():n.hide())}),s&&P.reflow(this._backdrop),t(this._backdrop).addClass(_),!e)return;if(!s)return void e();t(this._backdrop).one(P.TRANSITION_END,e).emulateTransitionEnd(150)}else if(!this._isShown&&this._backdrop){t(this._backdrop).removeClass(_);var r=function(){n._removeBackdrop(),e&&e()};P.supportsTransitionEnd()&&t(this._element).hasClass(d)?t(this._backdrop).one(P.TRANSITION_END,r).emulateTransitionEnd(150):r()}else e&&e()},p._adjustDialog=function(){var t=this._element.scrollHeight>document.documentElement.clientHeight;!this._isBodyOverflowing&&t&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!t&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},p._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},p._checkScrollbar=function(){var t=document.body.getBoundingClientRect();this._isBodyOverflowing=t.left+t.right<window.innerWidth,this._scrollbarWidth=this._getScrollbarWidth()},p._setScrollbar=function(){var e=this;if(this._isBodyOverflowing){t(g.FIXED_CONTENT).each(function(n,i){var s=t(i)[0].style.paddingRight,r=t(i).css("padding-right");t(i).data("padding-right",s).css("padding-right",parseFloat(r)+e._scrollbarWidth+"px")}),t(g.STICKY_CONTENT).each(function(n,i){var s=t(i)[0].style.marginRight,r=t(i).css("margin-right");t(i).data("margin-right",s).css("margin-right",parseFloat(r)-e._scrollbarWidth+"px")}),t(g.NAVBAR_TOGGLER).each(function(n,i){var s=t(i)[0].style.marginRight,r=t(i).css("margin-right");t(i).data("margin-right",s).css("margin-right",parseFloat(r)+e._scrollbarWidth+"px")});var n=document.body.style.paddingRight,i=t("body").css("padding-right");t("body").data("padding-right",n).css("padding-right",parseFloat(i)+this._scrollbarWidth+"px")}},p._resetScrollbar=function(){t(g.FIXED_CONTENT).each(function(e,n){var i=t(n).data("padding-right");"undefined"!=typeof i&&t(n).css("padding-right",i).removeData("padding-right")}),t(g.STICKY_CONTENT+", "+g.NAVBAR_TOGGLER).each(function(e,n){var i=t(n).data("margin-right");"undefined"!=typeof i&&t(n).css("margin-right",i).removeData("margin-right")});var e=t("body").data("padding-right");"undefined"!=typeof e&&t("body").css("padding-right",e).removeData("padding-right")},p._getScrollbarWidth=function(){var t=document.createElement("div");t.className=c,document.body.appendChild(t);var e=t.getBoundingClientRect().width-t.clientWidth;return document.body.removeChild(t),e},o._jQueryInterface=function(e,i){return this.each(function(){var s=t(this).data(n),a=r({},o.Default,t(this).data(),"object"==typeof e&&e);if(s||(s=new o(this,a),t(this).data(n,s)),"string"==typeof e){if("undefined"==typeof s[e])throw new TypeError('No method named "'+e+'"');s[e](i)}else a.show&&s.show(i)})},s(o,null,[{key:"VERSION",get:function(){return"4.0.0"}},{key:"Default",get:function(){return a}}]),o}();return t(document).on(h.CLICK_DATA_API,g.DATA_TOGGLE,function(e){var i,s=this,o=P.getSelectorFromElement(this);o&&(i=t(o)[0]);var a=t(i).data(n)?"toggle":r({},t(i).data(),t(this).data());"A"!==this.tagName&&"AREA"!==this.tagName||e.preventDefault();var l=t(i).one(h.SHOW,function(e){e.isDefaultPrevented()||l.one(h.HIDDEN,function(){t(s).is(":visible")&&s.focus()})});p._jQueryInterface.call(t(i),a,this)}),t.fn.modal=p._jQueryInterface,t.fn.modal.Constructor=p,t.fn.modal.noConflict=function(){return t.fn.modal=o,p._jQueryInterface},p}(e),U=function(t){var e="tooltip",i="bs.tooltip",o="."+i,a=t.fn[e],l=new RegExp("(^|\\s)bs-tooltip\\S+","g"),h={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(number|string)",container:"(string|element|boolean)",fallbackPlacement:"(string|array)",boundary:"(string|element)"},c={AUTO:"auto",TOP:"top",RIGHT:"right",BOTTOM:"bottom",LEFT:"left"},u={animation:!0,template:'<div class="tooltip" role="tooltip"><div class="arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent"},f="show",d="out",_={HIDE:"hide"+o,HIDDEN:"hidden"+o,SHOW:"show"+o,SHOWN:"shown"+o,INSERTED:"inserted"+o,CLICK:"click"+o,FOCUSIN:"focusin"+o,FOCUSOUT:"focusout"+o,MOUSEENTER:"mouseenter"+o,MOUSELEAVE:"mouseleave"+o},g="fade",p="show",m=".tooltip-inner",v=".arrow",E="hover",T="focus",y="click",C="manual",I=function(){function a(t,e){if("undefined"==typeof n)throw new TypeError("Bootstrap tooltips require Popper.js (https://popper.js.org)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=t,this.config=this._getConfig(e),this.tip=null,this._setListeners()}var I=a.prototype;return I.enable=function(){this._isEnabled=!0},I.disable=function(){this._isEnabled=!1},I.toggleEnabled=function(){this._isEnabled=!this._isEnabled},I.toggle=function(e){if(this._isEnabled)if(e){var n=this.constructor.DATA_KEY,i=t(e.currentTarget).data(n);i||(i=new this.constructor(e.currentTarget,this._getDelegateConfig()),t(e.currentTarget).data(n,i)),i._activeTrigger.click=!i._activeTrigger.click,i._isWithActiveTrigger()?i._enter(null,i):i._leave(null,i)}else{if(t(this.getTipElement()).hasClass(p))return void this._leave(null,this);this._enter(null,this)}},I.dispose=function(){clearTimeout(this._timeout),t.removeData(this.element,this.constructor.DATA_KEY),t(this.element).off(this.constructor.EVENT_KEY),t(this.element).closest(".modal").off("hide.bs.modal"),this.tip&&t(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,this._activeTrigger=null,null!==this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},I.show=function(){var e=this;if("none"===t(this.element).css("display"))throw new Error("Please use show on visible elements");var i=t.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){t(this.element).trigger(i);var s=t.contains(this.element.ownerDocument.documentElement,this.element);if(i.isDefaultPrevented()||!s)return;var r=this.getTipElement(),o=P.getUID(this.constructor.NAME);r.setAttribute("id",o),this.element.setAttribute("aria-describedby",o),this.setContent(),this.config.animation&&t(r).addClass(g);var l="function"==typeof this.config.placement?this.config.placement.call(this,r,this.element):this.config.placement,h=this._getAttachment(l);this.addAttachmentClass(h);var c=!1===this.config.container?document.body:t(this.config.container);t(r).data(this.constructor.DATA_KEY,this),t.contains(this.element.ownerDocument.documentElement,this.tip)||t(r).appendTo(c),t(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new n(this.element,r,{placement:h,modifiers:{offset:{offset:this.config.offset},flip:{behavior:this.config.fallbackPlacement},arrow:{element:v},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(t){t.originalPlacement!==t.placement&&e._handlePopperPlacementChange(t)},onUpdate:function(t){e._handlePopperPlacementChange(t)}}),t(r).addClass(p),"ontouchstart"in document.documentElement&&t("body").children().on("mouseover",null,t.noop);var u=function(){e.config.animation&&e._fixTransition();var n=e._hoverState;e._hoverState=null,t(e.element).trigger(e.constructor.Event.SHOWN),n===d&&e._leave(null,e)};P.supportsTransitionEnd()&&t(this.tip).hasClass(g)?t(this.tip).one(P.TRANSITION_END,u).emulateTransitionEnd(a._TRANSITION_DURATION):u()}},I.hide=function(e){var n=this,i=this.getTipElement(),s=t.Event(this.constructor.Event.HIDE),r=function(){n._hoverState!==f&&i.parentNode&&i.parentNode.removeChild(i),n._cleanTipClass(),n.element.removeAttribute("aria-describedby"),t(n.element).trigger(n.constructor.Event.HIDDEN),null!==n._popper&&n._popper.destroy(),e&&e()};t(this.element).trigger(s),s.isDefaultPrevented()||(t(i).removeClass(p),"ontouchstart"in document.documentElement&&t("body").children().off("mouseover",null,t.noop),this._activeTrigger[y]=!1,this._activeTrigger[T]=!1,this._activeTrigger[E]=!1,P.supportsTransitionEnd()&&t(this.tip).hasClass(g)?t(i).one(P.TRANSITION_END,r).emulateTransitionEnd(150):r(),this._hoverState="")},I.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},I.isWithContent=function(){return Boolean(this.getTitle())},I.addAttachmentClass=function(e){t(this.getTipElement()).addClass("bs-tooltip-"+e)},I.getTipElement=function(){return this.tip=this.tip||t(this.config.template)[0],this.tip},I.setContent=function(){var e=t(this.getTipElement());this.setElementContent(e.find(m),this.getTitle()),e.removeClass(g+" "+p)},I.setElementContent=function(e,n){var i=this.config.html;"object"==typeof n&&(n.nodeType||n.jquery)?i?t(n).parent().is(e)||e.empty().append(n):e.text(t(n).text()):e[i?"html":"text"](n)},I.getTitle=function(){var t=this.element.getAttribute("data-original-title");return t||(t="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),t},I._getAttachment=function(t){return c[t.toUpperCase()]},I._setListeners=function(){var e=this;this.config.trigger.split(" ").forEach(function(n){if("click"===n)t(e.element).on(e.constructor.Event.CLICK,e.config.selector,function(t){return e.toggle(t)});else if(n!==C){var i=n===E?e.constructor.Event.MOUSEENTER:e.constructor.Event.FOCUSIN,s=n===E?e.constructor.Event.MOUSELEAVE:e.constructor.Event.FOCUSOUT;t(e.element).on(i,e.config.selector,function(t){return e._enter(t)}).on(s,e.config.selector,function(t){return e._leave(t)})}t(e.element).closest(".modal").on("hide.bs.modal",function(){return e.hide()})}),this.config.selector?this.config=r({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},I._fixTitle=function(){var t=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||"string"!==t)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},I._enter=function(e,n){var i=this.constructor.DATA_KEY;(n=n||t(e.currentTarget).data(i))||(n=new this.constructor(e.currentTarget,this._getDelegateConfig()),t(e.currentTarget).data(i,n)),e&&(n._activeTrigger["focusin"===e.type?T:E]=!0),t(n.getTipElement()).hasClass(p)||n._hoverState===f?n._hoverState=f:(clearTimeout(n._timeout),n._hoverState=f,n.config.delay&&n.config.delay.show?n._timeout=setTimeout(function(){n._hoverState===f&&n.show()},n.config.delay.show):n.show())},I._leave=function(e,n){var i=this.constructor.DATA_KEY;(n=n||t(e.currentTarget).data(i))||(n=new this.constructor(e.currentTarget,this._getDelegateConfig()),t(e.currentTarget).data(i,n)),e&&(n._activeTrigger["focusout"===e.type?T:E]=!1),n._isWithActiveTrigger()||(clearTimeout(n._timeout),n._hoverState=d,n.config.delay&&n.config.delay.hide?n._timeout=setTimeout(function(){n._hoverState===d&&n.hide()},n.config.delay.hide):n.hide())},I._isWithActiveTrigger=function(){for(var t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1},I._getConfig=function(n){return"number"==typeof(n=r({},this.constructor.Default,t(this.element).data(),n)).delay&&(n.delay={show:n.delay,hide:n.delay}),"number"==typeof n.title&&(n.title=n.title.toString()),"number"==typeof n.content&&(n.content=n.content.toString()),P.typeCheckConfig(e,n,this.constructor.DefaultType),n},I._getDelegateConfig=function(){var t={};if(this.config)for(var e in this.config)this.constructor.Default[e]!==this.config[e]&&(t[e]=this.config[e]);return t},I._cleanTipClass=function(){var e=t(this.getTipElement()),n=e.attr("class").match(l);null!==n&&n.length>0&&e.removeClass(n.join(""))},I._handlePopperPlacementChange=function(t){this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(t.placement))},I._fixTransition=function(){var e=this.getTipElement(),n=this.config.animation;null===e.getAttribute("x-placement")&&(t(e).removeClass(g),this.config.animation=!1,this.hide(),this.show(),this.config.animation=n)},a._jQueryInterface=function(e){return this.each(function(){var n=t(this).data(i),s="object"==typeof e&&e;if((n||!/dispose|hide/.test(e))&&(n||(n=new a(this,s),t(this).data(i,n)),"string"==typeof e)){if("undefined"==typeof n[e])throw new TypeError('No method named "'+e+'"');n[e]()}})},s(a,null,[{key:"VERSION",get:function(){return"4.0.0"}},{key:"Default",get:function(){return u}},{key:"NAME",get:function(){return e}},{key:"DATA_KEY",get:function(){return i}},{key:"Event",get:function(){return _}},{key:"EVENT_KEY",get:function(){return o}},{key:"DefaultType",get:function(){return h}}]),a}();return t.fn[e]=I._jQueryInterface,t.fn[e].Constructor=I,t.fn[e].noConflict=function(){return t.fn[e]=a,I._jQueryInterface},I}(e),x=function(t){var e="popover",n="bs.popover",i="."+n,o=t.fn[e],a=new RegExp("(^|\\s)bs-popover\\S+","g"),l=r({},U.Default,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-header"></h3><div class="popover-body"></div></div>'}),h=r({},U.DefaultType,{content:"(string|element|function)"}),c="fade",u="show",f=".popover-header",d=".popover-body",_={HIDE:"hide"+i,HIDDEN:"hidden"+i,SHOW:"show"+i,SHOWN:"shown"+i,INSERTED:"inserted"+i,CLICK:"click"+i,FOCUSIN:"focusin"+i,FOCUSOUT:"focusout"+i,MOUSEENTER:"mouseenter"+i,MOUSELEAVE:"mouseleave"+i},g=function(r){var o,g;function p(){return r.apply(this,arguments)||this}g=r,(o=p).prototype=Object.create(g.prototype),o.prototype.constructor=o,o.__proto__=g;var m=p.prototype;return m.isWithContent=function(){return this.getTitle()||this._getContent()},m.addAttachmentClass=function(e){t(this.getTipElement()).addClass("bs-popover-"+e)},m.getTipElement=function(){return this.tip=this.tip||t(this.config.template)[0],this.tip},m.setContent=function(){var e=t(this.getTipElement());this.setElementContent(e.find(f),this.getTitle());var n=this._getContent();"function"==typeof n&&(n=n.call(this.element)),this.setElementContent(e.find(d),n),e.removeClass(c+" "+u)},m._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},m._cleanTipClass=function(){var e=t(this.getTipElement()),n=e.attr("class").match(a);null!==n&&n.length>0&&e.removeClass(n.join(""))},p._jQueryInterface=function(e){return this.each(function(){var i=t(this).data(n),s="object"==typeof e?e:null;if((i||!/destroy|hide/.test(e))&&(i||(i=new p(this,s),t(this).data(n,i)),"string"==typeof e)){if("undefined"==typeof i[e])throw new TypeError('No method named "'+e+'"');i[e]()}})},s(p,null,[{key:"VERSION",get:function(){return"4.0.0"}},{key:"Default",get:function(){return l}},{key:"NAME",get:function(){return e}},{key:"DATA_KEY",get:function(){return n}},{key:"Event",get:function(){return _}},{key:"EVENT_KEY",get:function(){return i}},{key:"DefaultType",get:function(){return h}}]),p}(U);return t.fn[e]=g._jQueryInterface,t.fn[e].Constructor=g,t.fn[e].noConflict=function(){return t.fn[e]=o,g._jQueryInterface},g}(e),K=function(t){var e="scrollspy",n="bs.scrollspy",i="."+n,o=t.fn[e],a={offset:10,method:"auto",target:""},l={offset:"number",method:"string",target:"(string|element)"},h={ACTIVATE:"activate"+i,SCROLL:"scroll"+i,LOAD_DATA_API:"load"+i+".data-api"},c="dropdown-item",u="active",f={DATA_SPY:'[data-spy="scroll"]',ACTIVE:".active",NAV_LIST_GROUP:".nav, .list-group",NAV_LINKS:".nav-link",NAV_ITEMS:".nav-item",LIST_ITEMS:".list-group-item",DROPDOWN:".dropdown",DROPDOWN_ITEMS:".dropdown-item",DROPDOWN_TOGGLE:".dropdown-toggle"},d="offset",_="position",g=function(){function o(e,n){var i=this;this._element=e,this._scrollElement="BODY"===e.tagName?window:e,this._config=this._getConfig(n),this._selector=this._config.target+" "+f.NAV_LINKS+","+this._config.target+" "+f.LIST_ITEMS+","+this._config.target+" "+f.DROPDOWN_ITEMS,this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,t(this._scrollElement).on(h.SCROLL,function(t){return i._process(t)}),this.refresh(),this._process()}var g=o.prototype;return g.refresh=function(){var e=this,n=this._scrollElement===this._scrollElement.window?d:_,i="auto"===this._config.method?n:this._config.method,s=i===_?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),t.makeArray(t(this._selector)).map(function(e){var n,r=P.getSelectorFromElement(e);if(r&&(n=t(r)[0]),n){var o=n.getBoundingClientRect();if(o.width||o.height)return[t(n)[i]().top+s,r]}return null}).filter(function(t){return t}).sort(function(t,e){return t[0]-e[0]}).forEach(function(t){e._offsets.push(t[0]),e._targets.push(t[1])})},g.dispose=function(){t.removeData(this._element,n),t(this._scrollElement).off(i),this._element=null,this._scrollElement=null,this._config=null,this._selector=null,this._offsets=null,this._targets=null,this._activeTarget=null,this._scrollHeight=null},g._getConfig=function(n){if("string"!=typeof(n=r({},a,n)).target){var i=t(n.target).attr("id");i||(i=P.getUID(e),t(n.target).attr("id",i)),n.target="#"+i}return P.typeCheckConfig(e,n,l),n},g._getScrollTop=function(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop},g._getScrollHeight=function(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)},g._getOffsetHeight=function(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height},g._process=function(){var t=this._getScrollTop()+this._config.offset,e=this._getScrollHeight(),n=this._config.offset+e-this._getOffsetHeight();if(this._scrollHeight!==e&&this.refresh(),t>=n){var i=this._targets[this._targets.length-1];this._activeTarget!==i&&this._activate(i)}else{if(this._activeTarget&&t<this._offsets[0]&&this._offsets[0]>0)return this._activeTarget=null,void this._clear();for(var s=this._offsets.length;s--;){this._activeTarget!==this._targets[s]&&t>=this._offsets[s]&&("undefined"==typeof this._offsets[s+1]||t<this._offsets[s+1])&&this._activate(this._targets[s])}}},g._activate=function(e){this._activeTarget=e,this._clear();var n=this._selector.split(",");n=n.map(function(t){return t+'[data-target="'+e+'"],'+t+'[href="'+e+'"]'});var i=t(n.join(","));i.hasClass(c)?(i.closest(f.DROPDOWN).find(f.DROPDOWN_TOGGLE).addClass(u),i.addClass(u)):(i.addClass(u),i.parents(f.NAV_LIST_GROUP).prev(f.NAV_LINKS+", "+f.LIST_ITEMS).addClass(u),i.parents(f.NAV_LIST_GROUP).prev(f.NAV_ITEMS).children(f.NAV_LINKS).addClass(u)),t(this._scrollElement).trigger(h.ACTIVATE,{relatedTarget:e})},g._clear=function(){t(this._selector).filter(f.ACTIVE).removeClass(u)},o._jQueryInterface=function(e){return this.each(function(){var i=t(this).data(n);if(i||(i=new o(this,"object"==typeof e&&e),t(this).data(n,i)),"string"==typeof e){if("undefined"==typeof i[e])throw new TypeError('No method named "'+e+'"');i[e]()}})},s(o,null,[{key:"VERSION",get:function(){return"4.0.0"}},{key:"Default",get:function(){return a}}]),o}();return t(window).on(h.LOAD_DATA_API,function(){for(var e=t.makeArray(t(f.DATA_SPY)),n=e.length;n--;){var i=t(e[n]);g._jQueryInterface.call(i,i.data())}}),t.fn[e]=g._jQueryInterface,t.fn[e].Constructor=g,t.fn[e].noConflict=function(){return t.fn[e]=o,g._jQueryInterface},g}(e),V=function(t){var e="bs.tab",n="."+e,i=t.fn.tab,r={HIDE:"hide"+n,HIDDEN:"hidden"+n,SHOW:"show"+n,SHOWN:"shown"+n,CLICK_DATA_API:"click.bs.tab.data-api"},o="dropdown-menu",a="active",l="disabled",h="fade",c="show",u=".dropdown",f=".nav, .list-group",d=".active",_="> li > .active",g='[data-toggle="tab"], [data-toggle="pill"], [data-toggle="list"]',p=".dropdown-toggle",m="> .dropdown-menu .active",v=function(){function n(t){this._element=t}var i=n.prototype;return i.show=function(){var e=this;if(!(this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE&&t(this._element).hasClass(a)||t(this._element).hasClass(l))){var n,i,s=t(this._element).closest(f)[0],o=P.getSelectorFromElement(this._element);if(s){var h="UL"===s.nodeName?_:d;i=(i=t.makeArray(t(s).find(h)))[i.length-1]}var c=t.Event(r.HIDE,{relatedTarget:this._element}),u=t.Event(r.SHOW,{relatedTarget:i});if(i&&t(i).trigger(c),t(this._element).trigger(u),!u.isDefaultPrevented()&&!c.isDefaultPrevented()){o&&(n=t(o)[0]),this._activate(this._element,s);var g=function(){var n=t.Event(r.HIDDEN,{relatedTarget:e._element}),s=t.Event(r.SHOWN,{relatedTarget:i});t(i).trigger(n),t(e._element).trigger(s)};n?this._activate(n,n.parentNode,g):g()}}},i.dispose=function(){t.removeData(this._element,e),this._element=null},i._activate=function(e,n,i){var s=this,r=("UL"===n.nodeName?t(n).find(_):t(n).children(d))[0],o=i&&P.supportsTransitionEnd()&&r&&t(r).hasClass(h),a=function(){return s._transitionComplete(e,r,i)};r&&o?t(r).one(P.TRANSITION_END,a).emulateTransitionEnd(150):a()},i._transitionComplete=function(e,n,i){if(n){t(n).removeClass(c+" "+a);var s=t(n.parentNode).find(m)[0];s&&t(s).removeClass(a),"tab"===n.getAttribute("role")&&n.setAttribute("aria-selected",!1)}if(t(e).addClass(a),"tab"===e.getAttribute("role")&&e.setAttribute("aria-selected",!0),P.reflow(e),t(e).addClass(c),e.parentNode&&t(e.parentNode).hasClass(o)){var r=t(e).closest(u)[0];r&&t(r).find(p).addClass(a),e.setAttribute("aria-expanded",!0)}i&&i()},n._jQueryInterface=function(i){return this.each(function(){var s=t(this),r=s.data(e);if(r||(r=new n(this),s.data(e,r)),"string"==typeof i){if("undefined"==typeof r[i])throw new TypeError('No method named "'+i+'"');r[i]()}})},s(n,null,[{key:"VERSION",get:function(){return"4.0.0"}}]),n}();return t(document).on(r.CLICK_DATA_API,g,function(e){e.preventDefault(),v._jQueryInterface.call(t(this),"show")}),t.fn.tab=v._jQueryInterface,t.fn.tab.Constructor=v,t.fn.tab.noConflict=function(){return t.fn.tab=i,v._jQueryInterface},v}(e);!function(t){if("undefined"==typeof t)throw new TypeError("Bootstrap's JavaScript requires jQuery. jQuery must be included before Bootstrap's JavaScript.");var e=t.fn.jquery.split(" ")[0].split(".");if(e[0]<2&&e[1]<9||1===e[0]&&9===e[1]&&e[2]<1||e[0]>=4)throw new Error("Bootstrap's JavaScript requires at least jQuery v1.9.1 but less than v4.0.0")}(e),t.Util=P,t.Alert=L,t.Button=R,t.Carousel=j,t.Collapse=H,t.Dropdown=W,t.Modal=M,t.Popover=x,t.Scrollspy=K,t.Tab=V,t.Tooltip=U,Object.defineProperty(t,"__esModule",{value:!0})}); |
index.js | module.exports = {
title: 'Task',
type: 'object',
description: 'This editor allows you to edit the various aspects of a task. All the data in this editor is organized as a JSON object, whose properties represent each task part. Some properties are optional, and are hidden by default in the editor; to display them, click "Object properties" on each tree item to add these optional properties.',
languages: {
"list": {
"en": "English",
"fr": "French",
"fa": "فارسی"
},
"rtl": ["fa"],
"original": "en"
},
definitions: {
fileDescr: {
type: 'string',
format: 'url',
options: {
upload: true
}
},
compilationDescr: require('./compilationDescr.js'),
executionParams: require('./executionParams'), | compilationDescr: {
$ref: '#/definitions/compilationDescr'
},
compilationExecution: {
$ref: '#/definitions/executionParams'
},
runExecution: {
$ref: '#/definitions/executionParams'
}
},
required: ['compilationDescr', 'compilationExecution', 'runExecution']
},
filename: {
type: 'string',
description: 'A valid file name.',
pattern: '^\\w[\\w.~/-]+$'
}
},
properties: {
title: {
type: 'string',
title: 'Title',
generator: [
{
output: {
inject: {
template: 'index.html',
selector: 'title'
}
}
},
{
output: {
inject: {
template: "index.html",
selector: "#taskTitle"
}
}
}
]
},
icon: {
type: "string",
description: 'PNG image file.',
title: 'Icon',
format: "url",
options: {
upload: true,
editor: true
},
generator: [
{
output: {
copy: "icon.png"
}
}
]
},
taskIntro: {
title: "Task statement",
description: "Task statement, displayed to the user.",
type: "string",
format: "html",
options: {
wysiwyg: true
},
generator: [
{
output: {
inject: {
template: "index.html",
selector: "#taskIntro"
}
}
}
]
},
AlgoreaTrainingTaskMetaData: require('./AlgoreaTrainingTaskMetaData.js'),
PEMTaskMetaData: require('./PEMTaskMetaData.js'),
gridInfos: require('./gridInfos.js'),
difficulties: require('./difficulties.js'),
displayHelper: require('./displayHelper.js')
},
required: [
'title',
'icon',
'taskIntro',
'AlgoreaTrainingTaskMetaData',
'PEMTaskMetaData',
'gridInfos',
'difficulties',
'displayHelper'
],
translate: [
'title',
'taskIntro'
],
generator: [
{
input: {
collector: 'collectors/subTask.gridInfos.js'
},
output: {
inject: {
template: 'task.js',
selector: '$subTask.gridInfos'
}
}
},
{
input: {
collector: 'collectors/subTask.data.js'
},
output: {
inject: {
template: 'task.js',
selector: '$subTaskData'
}
}
},
{
input: {
collector: 'collectors/image_files.js',
keepArray: true
},
output: {
render: {
template: 'images.html'
},
inject: {
template: 'index.html',
selector: '#images-preload'
}
}
}
]
}; | compileAndRunParams: {
type: 'object',
description: 'Parameters for a compilation and an execution.',
properties: { |
floor_fn.rs | // Copyright 2019 The n-sql Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast::Expression;
#[derive(Clone, Debug)]
pub struct FloorFn {
pub expr: Box<Expression>
}
impl FloorFn {
pub fn new(expr: Box<Expression>) -> FloorFn |
} | {
FloorFn { expr }
} |
app_PyQtArnold_RenderTest.py | import os
import sys
import ctypes
from os import listdir, path
from os.path import isfile
from PyQt4.QtGui import *
from PyQt4.QtCore import *
# Before using arnold!
# I have to load dll manually
# Problem using dlls
s_path = "C:/solidangle/arnold/Arnold-5.2.2.0-windows/bin"
if os.access(s_path, os.F_OK):
dlls = [dll for dll in listdir(s_path) if isfile(path.join(s_path, dll)) and
('.dll' in dll)]
for dll in dlls:
print "{0}/{1}".format(s_path, dll)
ctypes.WinDLL ("{0}/{1}".format(s_path, dll))
# End of loading dlls
from UI import main_dialog, bridge_function
from Core import app_PyQtArnold_core
modules = [main_dialog, bridge_function, app_PyQtArnold_core]
[reload (xi) for xi in modules]
class | (QDialog, main_dialog.Ui_main_dialog):
def __init__(self, parent=None):
super(MyApplication, self).__init__(parent)
self.setupUi(self)
def main():
if QCoreApplication.instance() is not None:
app = QCoreApplication.instance()
else:
app = QApplication(sys.argv)
app.aboutToQuit.connect(app.quit)
window = MyApplication()
core = app_PyQtArnold_core.CoreFunctions()
bridge = bridge_function.UiActions(
win_dialog=window, core_func = core)
window.show()
try:
sys.exit(app.exec_())
except:
pass
main() | MyApplication |
orchestrator.py | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05-orchestrator.ipynb (unless otherwise specified).
__all__ = ['retry_request', 'if_possible_parse_local_datetime', 'SP_and_date_request', 'handle_capping',
'date_range_request', 'year_request', 'construct_year_month_pairs', 'year_and_month_request',
'clean_year_week', 'construct_year_week_pairs', 'year_and_week_request', 'non_temporal_request',
'query_orchestrator']
# Cell
import pandas as pd
from tqdm import tqdm
from warnings import warn
from requests.models import Response
from . import utils, raw
# Cell
def retry_request(raw, method, kwargs, n_attempts=3):
attempts = 0
success = False
while (attempts < n_attempts) and (success == False):
try:
r = getattr(raw, method)(**kwargs)
utils.check_status(r)
success = True
except Exception as e:
attempts += 1
if attempts == n_attempts:
raise e
return r
def if_possible_parse_local_datetime(df):
dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod', 'initialForecastPublishingPeriodCommencingTime', 'latestForecastPublishingPeriodCommencingTime', 'outTurnPublishingPeriodCommencingTime']
dt_cols = [col for col in df.columns if 'date' in col.lower() or col in dt_cols_with_period_in_name]
sp_cols = [col for col in df.columns if 'period' in col.lower() and col not in dt_cols_with_period_in_name]
if len(dt_cols)==1 and len(sp_cols)==1:
df = utils.parse_local_datetime(df, dt_col=dt_cols[0], SP_col=sp_cols[0])
return df
def SP_and_date_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
df_dates_SPs = utils.dt_rng_to_SPs(start_date, end_date)
date_SP_tuples = list(df_dates_SPs.reset_index().itertuples(index=False, name=None))[:-1]
for datetime, query_date, SP in tqdm(date_SP_tuples, desc=stream, total=len(date_SP_tuples)):
kwargs.update({
kwargs_map['date']: datetime.strftime('%Y-%m-%d'),
kwargs_map['SP']: SP,
})
missing_kwargs = list(set(func_params) - set(['SP', 'date'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_SP = utils.parse_xml_response(r)
df = pd.concat([df, df_SP])
df = utils.expand_cols(df)
df = if_possible_parse_local_datetime(df)
return df
# Cell
def handle_capping(
r: Response,
df: pd.DataFrame,
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
end_date: str,
request_type: str,
**kwargs
):
capping_applied = utils.check_capping(r)
assert capping_applied != None, 'No information on whether or not capping limits had been breached could be found in the response metadata'
if capping_applied == True: # only subset of date range returned
dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod']
dt_cols = [col for col in df.columns if ('date' in col.lower() or col in dt_cols_with_period_in_name) and ('end' not in col.lower())]
if len(dt_cols) == 1:
start_date = pd.to_datetime(df[dt_cols[0]]).max().strftime('%Y-%m-%d')
if 'start_time' in kwargs.keys():
kwargs['start_time'] = '00:00'
if pd.to_datetime(start_date) >= pd.to_datetime(end_date):
warnings.warn(f'The `end_date` ({end_date}) was earlier than `start_date` ({start_date})\nThe `start_date` will be set one day earlier than the `end_date`.')
start_date = (pd.to_datetime(end_date) - pd.Timedelta(days=1)).strftime('%Y-%m-%d')
warn(f'Response was capped, request is rerunning for missing data from {start_date}')
df_rerun = date_range_request(
method=method,
kwargs_map=kwargs_map,
func_params=func_params,
api_key=api_key,
start_date=start_date,
end_date=end_date,
request_type=request_type,
**kwargs
)
df = pd.concat([df, df_rerun])
df = df.drop_duplicates()
else:
warn(f'Response was capped: a new `start_date` to continue requesting could not be determined automatically, please handle manually for `{method}`')
return df
def date_range_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
request_type: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
for kwarg in ['start_time', 'end_time']:
if kwarg not in kwargs_map.keys():
kwargs_map[kwarg] = kwarg
kwargs[kwargs_map['start_date']], kwargs[kwargs_map['start_time']] = pd.to_datetime(start_date).strftime('%Y-%m-%d %H:%M:%S').split(' ')
kwargs[kwargs_map['end_date']], kwargs[kwargs_map['end_time']] = pd.to_datetime(end_date).strftime('%Y-%m-%d %H:%M:%S').split(' ')
if 'SP' in kwargs_map.keys():
kwargs[kwargs_map['SP']] = '*'
func_params.remove('SP')
func_params += [kwargs_map['SP']]
missing_kwargs = list(set(func_params) - set(['start_date', 'end_date', 'start_time', 'end_time'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
if request_type == 'date_range':
kwargs.pop(kwargs_map['start_time'])
kwargs.pop(kwargs_map['end_time'])
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df = utils.parse_xml_response(r)
df = if_possible_parse_local_datetime(df)
# Handling capping
df = handle_capping(
r,
df,
method=method,
kwargs_map=kwargs_map,
func_params=func_params,
api_key=api_key,
end_date=end_date,
request_type=request_type,
**kwargs
)
return df
| def year_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
start_year = int(pd.to_datetime(start_date).strftime('%Y'))
end_year = int(pd.to_datetime(end_date).strftime('%Y'))
for year in tqdm(range(start_year, end_year+1), desc=stream):
kwargs.update({kwargs_map['year']: year})
missing_kwargs = list(set(func_params) - set(['year'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_year = utils.parse_xml_response(r)
df = pd.concat([df, df_year])
df = if_possible_parse_local_datetime(df)
return df
# Cell
def construct_year_month_pairs(start_date, end_date):
dt_rng = pd.date_range(start_date, end_date, freq='M')
if len(dt_rng) == 0:
year_month_pairs = [tuple(pd.to_datetime(start_date).strftime('%Y %b').split(' '))]
else:
year_month_pairs = [tuple(dt.strftime('%Y %b').split(' ')) for dt in dt_rng]
year_month_pairs = [(int(year), week.upper()) for year, week in year_month_pairs]
return year_month_pairs
def year_and_month_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
year_month_pairs = construct_year_month_pairs(start_date, end_date)
for year, month in tqdm(year_month_pairs, desc=stream):
kwargs.update({
kwargs_map['year']: year,
kwargs_map['month']: month
})
missing_kwargs = list(set(func_params) - set(['year', 'month'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_year = utils.parse_xml_response(r)
df = pd.concat([df, df_year])
df = if_possible_parse_local_datetime(df)
return df
# Cell
def clean_year_week(year, week):
year = int(year)
if week == '00':
year = int(year) - 1
week = 52
else:
year = int(year)
week = int(week.strip('0'))
return year, week
def construct_year_week_pairs(start_date, end_date):
dt_rng = pd.date_range(start_date, end_date, freq='W')
if len(dt_rng) == 0:
year_week_pairs = [tuple(pd.to_datetime(start_date).strftime('%Y %W').split(' '))]
else:
year_week_pairs = [tuple(dt.strftime('%Y %W').split(' ')) for dt in dt_rng]
year_week_pairs = [clean_year_week(year, week) for year, week in year_week_pairs]
return year_week_pairs
def year_and_week_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
year_week_pairs = construct_year_week_pairs(start_date, end_date)
for year, week in tqdm(year_week_pairs, desc=stream):
kwargs.update({
kwargs_map['year']: year,
kwargs_map['week']: week
})
missing_kwargs = list(set(func_params) - set(['year', 'week'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_year = utils.parse_xml_response(r)
df = pd.concat([df, df_year])
df = if_possible_parse_local_datetime(df)
return df
# Cell
def non_temporal_request(
method: str,
api_key: str,
n_attempts: int=3,
**kwargs
):
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df = utils.parse_xml_response(r)
df = if_possible_parse_local_datetime(df)
return df
# Cell
def query_orchestrator(
method: str,
api_key: str,
request_type: str,
kwargs_map: dict=None,
func_params: list=None,
start_date: str=None,
end_date: str=None,
n_attempts: int=3,
**kwargs
):
if request_type not in ['non_temporal']:
kwargs.update({
'kwargs_map': kwargs_map,
'func_params': func_params,
'start_date': start_date,
'end_date': end_date,
})
if request_type in ['date_range', 'date_time_range']:
kwargs.update({
'request_type': request_type,
})
request_type_to_func = {
'SP_and_date': SP_and_date_request,
'date_range': date_range_request,
'date_time_range': date_range_request,
'year': year_request,
'year_and_month': year_and_month_request,
'year_and_week': year_and_week_request,
'non_temporal': non_temporal_request
}
assert request_type in request_type_to_func.keys(), f"{request_type} must be one of: {', '.join(request_type_to_func.keys())}"
request_func = request_type_to_func[request_type]
df = request_func(
method=method,
api_key=api_key,
n_attempts=n_attempts,
**kwargs
)
df = df.reset_index(drop=True)
return df | # Cell |
coherence-negative-impls-safe.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// pretty-expanded FIXME #23616
#![feature(optin_builtin_traits)]
use std::marker::Send;
struct TestType;
impl !Send for TestType {}
fn | () {}
| main |
tools.py | """Copyright 2014 Cyrus Dasadia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.shortcuts import redirect, render_to_response
from django.forms.formsets import formset_factory
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from cito_engine.forms import tools_form, events
@login_required(login_url='/login/')
def bulk_upload_events(request):
BulkEventsFormset = formset_factory(events.EventForm, extra=0)
render_vars = dict()
if request.method != 'POST':
return redirect('/incidents/')
else:
# If we got a bunch of lines for events
if 'list_of_items' in request.POST:
list_of_events = request.POST.get('list_of_items')
severity = request.POST.get('severity')
category = request.POST.get('category')
team = request.POST.get('team')
initial_data = []
if list_of_events:
|
else:
render_vars['errors'] = ['List was empty!']
# We got the formset
elif 'form-INITIAL_FORMS' in request.POST:
render_vars['formset'] = BulkEventsFormset(request.POST)
user_teams = request.user.team_set.all()
if render_vars['formset'].is_valid():
for form in render_vars['formset']:
if form.cleaned_data.get('team') not in user_teams:
render_vars['errors'] = ['Cannot add event for %s, you are not a member of this team.' %
form.cleaned_data.get('team')]
return render_to_response('bulk_upload.html', render_vars,
context_instance=RequestContext(request))
else:
form.save()
return redirect('/events/')
return render_to_response('bulk_upload.html', render_vars, context_instance=RequestContext(request))
@login_required(login_url='/login/')
def show_bulk_upload_form(request, upload_item):
form = tools_form.BulkUploadForm()
render_vars = dict()
render_vars['form'] = form
render_vars['box_title'] = 'Bulk add events'
render_vars['page_title'] = 'Bulk add events'
if upload_item == 'events':
render_vars['form_action'] = '/tools/bulkupload/events/confirm/'
return render_to_response('generic_form.html', render_vars, context_instance=RequestContext(request))
| for event_summary in list_of_events.splitlines():
initial_data.append({'summary': event_summary,
'description': event_summary,
'severity': severity,
'team': team,
'category': category,
})
render_vars['formset'] = BulkEventsFormset(initial=initial_data) |
menu_popup.rs | use crate::{
align::Align,
event::{Callback, Event, EventResult, Key, MouseButton, MouseEvent},
menu,
rect::Rect,
theme::ColorStyle,
view::scroll,
view::{Position, View},
views::OnEventView,
Cursive, Printer, Vec2, With,
};
use std::cmp::min;
use std::rc::Rc;
use unicode_width::UnicodeWidthStr;
/// Popup that shows a list of items.
///
/// This is mostly used indirectly when creating a [popup `SelectView`][1] or
/// a [menubar][2].
///
/// [1]: crate::views::SelectView::popup()
/// [2]: crate::Cursive::menubar()
pub struct MenuPopup {
menu: Rc<menu::Tree>,
focus: usize,
scroll_core: scroll::Core,
align: Align,
on_dismiss: Option<Callback>,
on_action: Option<Callback>,
}
// The `scroll::Scroller` trait is used to weave the borrow phases.
//
// TODO: use some macro to auto-generate this.
impl_scroller!(MenuPopup::scroll_core);
impl MenuPopup {
/// Creates a new `MenuPopup` using the given menu tree.
pub fn new(menu: Rc<menu::Tree>) -> Self |
/// Sets the currently focused element.
pub fn set_focus(&mut self, focus: usize) {
self.focus = min(focus, self.menu.len());
}
/// Sets the currently focused element.
///
/// Chainable variant.
pub fn focus(self, focus: usize) -> Self {
self.with(|s| s.set_focus(focus))
}
/// Returns the position of the currently focused child.
pub fn get_focus(&self) -> usize {
self.focus
}
fn item_width(item: &menu::Item) -> usize {
match *item {
menu::Item::Delimiter => 1,
menu::Item::Leaf { ref label, .. } => label.width(),
menu::Item::Subtree { ref label, .. } => label.width() + 3,
}
}
/// Sets the alignment for this view.
///
/// Chainable variant.
pub fn align(self, align: Align) -> Self {
self.with(|s| s.set_align(align))
}
/// Sets the alignment for this view.
pub fn set_align(&mut self, align: Align) {
self.align = align;
}
/// Sets a callback to be used when this view is actively dismissed.
///
/// (When the user hits <ESC>)
///
/// Chainable variant.
pub fn on_dismiss<F: 'static + Fn(&mut Cursive)>(self, f: F) -> Self {
self.with(|s| s.set_on_dismiss(f))
}
/// Sets a callback to be used when this view is actively dismissed.
///
/// (When the user hits <ESC>)
pub fn set_on_dismiss<F: 'static + Fn(&mut Cursive)>(&mut self, f: F) {
self.on_dismiss = Some(Callback::from_fn(f));
}
/// Sets a callback to be used when a leaf is activated.
///
/// Will also be called if a leaf from a subtree is activated.
///
/// Usually used to hide the parent view.
///
/// Chainable variant.
pub fn on_action<F: 'static + Fn(&mut Cursive)>(self, f: F) -> Self {
self.with(|s| s.set_on_action(f))
}
/// Sets a callback to be used when a leaf is activated.
///
/// Will also be called if a leaf from a subtree is activated.
///
/// Usually used to hide the parent view.
pub fn set_on_action<F: 'static + Fn(&mut Cursive)>(&mut self, f: F) {
self.on_action = Some(Callback::from_fn(f));
}
fn scroll_up(&mut self, mut n: usize, cycle: bool) {
while n > 0 {
if self.focus > 0 {
self.focus -= 1;
} else if cycle {
self.focus = self.menu.children.len() - 1;
} else {
break;
}
if self.menu.children[self.focus].is_enabled() {
n -= 1;
}
}
}
fn scroll_down(&mut self, mut n: usize, cycle: bool) {
while n > 0 {
if self.focus + 1 < self.menu.children.len() {
self.focus += 1;
} else if cycle {
self.focus = 0;
} else {
// Stop if we're at the bottom.
break;
}
if self.menu.children[self.focus].is_enabled() {
n -= 1;
}
}
}
fn submit(&mut self) -> EventResult {
match self.menu.children[self.focus] {
menu::Item::Leaf { ref cb, .. } => {
let cb = cb.clone();
let action_cb = self.on_action.clone();
EventResult::with_cb(move |s| {
// Remove ourselves from the face of the earth
s.pop_layer();
// If we had prior orders, do it now.
if let Some(ref action_cb) = action_cb {
action_cb.clone()(s);
}
// And transmit his last words.
cb.clone()(s);
})
}
menu::Item::Subtree { ref tree, .. } => self.make_subtree_cb(tree),
_ => unreachable!("Delimiters cannot be submitted."),
}
}
fn dismiss(&mut self) -> EventResult {
let dismiss_cb = self.on_dismiss.clone();
EventResult::with_cb(move |s| {
if let Some(ref cb) = dismiss_cb {
cb.clone()(s);
}
s.pop_layer();
})
}
fn make_subtree_cb(&self, tree: &Rc<menu::Tree>) -> EventResult {
let tree = Rc::clone(tree);
let max_width = 4 + self
.menu
.children
.iter()
.map(MenuPopup::item_width)
.max()
.unwrap_or(1);
let offset = Vec2::new(max_width, self.focus);
let action_cb = self.on_action.clone();
EventResult::with_cb(move |s| {
let action_cb = action_cb.clone();
s.screen_mut().add_layer_at(
Position::parent(offset),
OnEventView::new(MenuPopup::new(Rc::clone(&tree)).on_action(
move |s| {
// This will happen when the subtree popup
// activates something;
// First, remove ourselve.
s.pop_layer();
if let Some(ref action_cb) = action_cb {
action_cb.clone()(s);
}
},
))
.on_event(Key::Left, |s| {
s.pop_layer();
}),
);
})
}
/// Handle an event for the content.
///
/// Here the event has already been relativized. This means `y=0` points to the first item.
fn inner_on_event(&mut self, event: Event) -> EventResult {
match event {
Event::Key(Key::Up) => self.scroll_up(1, true),
Event::Key(Key::PageUp) => self.scroll_up(5, false),
Event::Key(Key::Down) => self.scroll_down(1, true),
Event::Key(Key::PageDown) => self.scroll_down(5, false),
Event::Key(Key::Home) => self.focus = 0,
Event::Key(Key::End) => {
self.focus = self.menu.children.len().saturating_sub(1)
}
Event::Key(Key::Right)
if self.menu.children[self.focus].is_subtree() =>
{
return match self.menu.children[self.focus] {
menu::Item::Subtree { ref tree, .. } => {
self.make_subtree_cb(tree)
}
_ => unreachable!("Child is a subtree"),
};
}
Event::Key(Key::Enter)
if self.menu.children[self.focus].is_enabled() =>
{
return self.submit();
}
Event::Mouse {
event: MouseEvent::Press(_),
position,
offset,
} => {
// eprintln!("Position: {:?} / {:?}", position, offset);
if let Some(position) = position.checked_sub(offset) {
// Now `position` is relative to the top-left of the content.
let focus = position.y;
if focus < self.menu.len()
&& self.menu.children[focus].is_enabled()
{
self.focus = focus;
}
}
}
Event::Mouse {
event: MouseEvent::Release(MouseButton::Left),
position,
offset,
} if self.menu.children[self.focus].is_enabled()
&& position
.checked_sub(offset)
.map(|position| position.y == self.focus)
.unwrap_or(false) =>
{
return self.submit();
}
Event::Key(Key::Esc) => {
return self.dismiss();
}
_ => return EventResult::Ignored,
}
EventResult::Consumed(None)
}
/// Compute the required size for the content.
fn inner_required_size(&mut self, _req: Vec2) -> Vec2 {
let w = 2 + self
.menu
.children
.iter()
.map(Self::item_width)
.max()
.unwrap_or(1);
let h = self.menu.children.len();
Vec2::new(w, h)
}
fn inner_important_area(&self, size: Vec2) -> Rect {
if self.menu.is_empty() {
return Rect::from((0, 0));
}
Rect::from_size((0, self.focus), (size.x, 1))
}
}
impl View for MenuPopup {
fn draw(&self, printer: &Printer) {
if !printer.size.fits((2, 2)) {
return;
}
let h = self.menu.len();
// If we're too high, add a vertical offset
let offset = self.align.v.get_offset(h, printer.size.y);
let printer = &printer.offset((0, offset));
// Start with a box
scroll::draw_box_frame(
self,
&printer,
|s, y| s.menu.children[y].is_delimiter(),
|_s, _x| false,
);
// We're giving it a reduced size because of borders.
let printer = printer.shrinked_centered((2, 2));
scroll::draw_lines(self, &printer, |s, printer, i| {
let item = &s.menu.children[i];
let enabled =
printer.enabled && (item.is_enabled() || item.is_delimiter());
let color = if !enabled {
ColorStyle::secondary()
} else if i == s.focus {
ColorStyle::highlight()
} else {
ColorStyle::primary()
};
printer.with_style(color, |printer| {
match *item {
menu::Item::Delimiter => {
// printer.print_hdelim((0, 0), printer.size.x)
printer.print_hline((0, 0), printer.size.x, "─");
}
menu::Item::Subtree { ref label, .. } => {
if printer.size.x < 4 {
return;
}
printer.print_hline((0, 0), printer.size.x, " ");
printer.print((1, 0), label);
let x = printer.size.x.saturating_sub(3);
printer.print((x, 0), ">>");
}
menu::Item::Leaf { ref label, .. } => {
if printer.size.x < 2 {
return;
}
printer.print_hline((0, 0), printer.size.x, " ");
printer.print((1, 0), label);
}
}
});
});
}
fn required_size(&mut self, req: Vec2) -> Vec2 {
// We can't really shrink our items here, so it's not flexible.
// 2 is the padding
scroll::required_size(
self,
req.saturating_sub((2, 2)),
true,
Self::inner_required_size,
) + (2, 2)
}
fn on_event(&mut self, event: Event) -> EventResult {
match scroll::on_event(
self,
event.relativized((1, 1)),
Self::inner_on_event,
Self::inner_important_area,
) {
EventResult::Ignored => {
// Check back the non-relativized event now
if let Event::Mouse {
event: MouseEvent::Press(_),
position,
offset,
} = event
{
// Mouse press will be ignored if they are outside of the content.
// They can be on the border, or entirely outside of the popup.
// Mouse clicks outside of the popup should dismiss it.
if !position.fits_in_rect(
offset,
self.scroll_core.last_outer_size() + (2, 2),
) {
let dismiss_cb = self.on_dismiss.clone();
return EventResult::with_cb(move |s| {
if let Some(ref cb) = dismiss_cb {
cb.clone()(s);
}
s.pop_layer();
});
}
}
EventResult::Ignored
}
other => other,
}
}
fn layout(&mut self, size: Vec2) {
scroll::layout(
self,
size.saturating_sub((2, 2)),
true,
|_s, _size| (),
Self::inner_required_size,
);
}
fn important_area(&self, size: Vec2) -> Rect {
scroll::important_area(
self,
size.saturating_sub((2, 2)),
Self::inner_important_area,
)
.with(|area| area.offset((1, 1)))
}
}
| {
MenuPopup {
menu,
focus: 0,
scroll_core: scroll::Core::new(),
align: Align::top_left(),
on_dismiss: None,
on_action: None,
}
} |
postal_code.py | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from yepes import forms
from yepes.fields.char import CharField
from yepes.validators import PostalCodeValidator
from yepes.utils.deconstruct import clean_keywords
class PostalCodeField(CharField):
default_validators = [PostalCodeValidator()]
description = _('Generic postal code')
def __init__(self, *args, **kwargs):
|
def deconstruct(self):
name, path, args, kwargs = super(PostalCodeField, self).deconstruct()
path = path.replace('yepes.fields.postal_code', 'yepes.fields')
clean_keywords(self, kwargs, variables={
'max_length': 15,
}, constants=[
'force_lower',
'force_upper',
'normalize_spaces',
'trim_spaces',
])
return name, path, args, kwargs
def formfield(self, **kwargs):
kwargs.setdefault('form_class', forms.PostalCodeField)
return super(PostalCodeField, self).formfield(**kwargs)
| kwargs['force_lower'] = False
kwargs['force_upper'] = True
kwargs.setdefault('max_length', 15)
kwargs['normalize_spaces'] = True
kwargs['trim_spaces'] = False
super(PostalCodeField, self).__init__(*args, **kwargs) |
ModelPropertiesDialog.tsx | import React, {FunctionComponent} from 'react';
import {em, size, transitionProps} from '~/utils/style';
import Icon from '~/components/Icon';
import Properties from '~/components/GraphPage/Properties';
import type {Properties as PropertiesType} from '~/resource/graph/types';
import styled from 'styled-components';
import {useTranslation} from 'react-i18next';
const Dialog = styled.div`
position: fixed;
top: 0;
left: 0;
width: 100vw;
height: 100vh;
overscroll-behavior: none;
background-color: var(--mask-color);
z-index: 999;
${transitionProps('background-color')}
> .modal {
width: ${em(536)};
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
box-shadow: 0 2px 20px 0 rgba(0, 0, 0, 0.08);
> .modal-header {
padding: 0 ${em(40, 18)};
height: ${em(47, 18)};
background-color: var(--model-header-background-color);
display: flex;
justify-content: space-between;
align-items: center;
font-size: ${em(18)};
${transitionProps('background-color')}
> .modal-title {
flex: auto;
}
> .modal-close {
flex: none; | text-align: center;
cursor: pointer;
}
}
> .modal-body {
padding: ${em(40)};
background-color: var(--background-color);
overflow: auto;
max-height: calc(80vh - ${em(47)});
${transitionProps('background-color')}
}
}
`;
type ModelPropertiesDialogProps = {
data?: PropertiesType | null;
onClose?: () => unknown;
};
const ModelPropertiesDialog: FunctionComponent<ModelPropertiesDialogProps> = ({data, onClose}) => {
const {t} = useTranslation('graph');
if (!data) {
return null;
}
return (
<Dialog>
<div className="modal">
<div className="modal-header">
<span className="modal-title">{t('graph:model-properties')}</span>
<a className="modal-close" onClick={() => onClose?.()}>
<Icon type="close" />
</a>
</div>
<div className="modal-body">
<Properties {...data} expand />
</div>
</div>
</Dialog>
);
};
export default ModelPropertiesDialog; | ${size(em(14, 18), em(14, 18))}
font-size: ${em(14, 18)}; |
index.js | module.exports.Proxy = require("./src/Proxy.js"); | module.exports.createProxy = require("./src/createProxy.js"); |
|
mongo.go | // Copyright 2018 Hurricanezwf. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mongo
import (
"time"
mgo "gopkg.in/mgo.v2"
)
var (
ErrNotFound = mgo.ErrNotFound
)
type Interface interface {
// Open 启动Mongo Driver
Open(conf *Config) error
// Close 关闭MongoDriver
Close() error
// GetSession 获取一个session, 这里的GetSession采用的是Copy的方式
GetSession() *mgo.Session
// PutSession 释放一个session
PutSession(s *mgo.Session)
}
func New() Interface {
return newMongoV1()
}
type Config struct {
mgo.DialInfo
}
func DefaultConfig(addrs []string) *Config {
return &Config{
mgo.DialInfo{
Addrs: addrs,
Timeout: 10 * time.Second,
FailFast: true,
},
}
}
type mongoV1 struct {
conf *Config
rootSession *mgo.Session
}
func newMongoV1() Interface {
return &mongoV1{}
| *mongoV1) Open(conf *Config) (err error) {
m.rootSession, err = mgo.DialWithInfo(&conf.DialInfo)
if err != nil {
return err
}
if err = m.rootSession.Ping(); err != nil {
return err
}
return err
}
func (m *mongoV1) Close() error {
if m.rootSession != nil {
m.rootSession.Close()
}
return nil
}
func (m *mongoV1) GetSession() *mgo.Session {
return m.rootSession.Copy()
}
func (m *mongoV1) PutSession(s *mgo.Session) {
if s != nil {
s.Close()
}
}
| }
func (m |
core.go | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logger
import "go.uber.org/zap/zapcore"
// Core is a zap Core used for logging
type Core struct {
core zapcore.Core
emitter *Emitter
}
// With adds contextual fields to the underlying core.
func (c *Core) With(fields []zapcore.Field) zapcore.Core {
return &Core{
core: c.core.With(fields),
emitter: c.emitter,
}
}
// Enabled will check if the supplied log level is enabled.
func (c *Core) Enabled(level zapcore.Level) bool {
return c.core.Enabled(level)
}
// Check checks the entry and determines if the core should write it.
func (c *Core) Check(zapEntry zapcore.Entry, checkedEntry *zapcore.CheckedEntry) *zapcore.CheckedEntry {
if !c.Enabled(zapEntry.Level) {
return checkedEntry
}
return checkedEntry.AddCore(zapEntry, c)
}
// Write sends an entry to the emitter before logging.
func (c *Core) Write(zapEntry zapcore.Entry, fields []zapcore.Field) error {
stanzaEntry := parseEntry(zapEntry, fields)
c.emitter.emit(stanzaEntry)
return c.core.Write(zapEntry, fields)
}
// Sync will sync the underlying core.
func (c *Core) Sync() error { | func newCore(core zapcore.Core, emitter *Emitter) *Core {
return &Core{
core: core,
emitter: emitter,
}
} | return c.core.Sync()
}
// newCore creates a new core. |
datatypes.py | """
Contains various data structures used by Bionic's infrastructure.
"""
import attr
from .utils.misc import ImmutableSequence, ImmutableMapping
@attr.s(frozen=True)
class EntityDefinition:
"""
Describes the immutable properties of an entity. These properties generally have
to do with the entity's "contract": the assumptions other parts of the system can
make about its value. However, this does *not* include the way the entity's value
is determined; this is configured separately and can be changed more easily.
Attributes
----------
name: string
The name of the entity.
protocol: Protocol
The protocol to use when serializing and deserializing entity values on disk.
doc: string
A human-readable description of the entity.
optional_should_memoize: boolean or None
Whether the entity should be memoized, or None if the global default should be
used.
optional_should_persist: boolean or None
Whether the entity should be persisted, or None if the global default should be
used
needs_caching: boolean
Indicates that some kind of caching needs to be enabled for this entity (either
persistence or memoization).
"""
name = attr.ib()
protocol = attr.ib()
doc = attr.ib()
optional_should_memoize = attr.ib()
optional_should_persist = attr.ib()
needs_caching = attr.ib(default=False)
@attr.s(frozen=True)
class DescriptorMetadata:
"""
Holds extra data we might need when working with a descriptor.
Similar to an EntityDefinition, but can apply to non-entity descriptors, and also
incorporates information from the global configuration. (For example,
EntityDefinition has an `optional_should_memoize` field which describes the
user's memoization preferences, if any; this class has a `should_memoize` field
which describes what we'll actually do, based on both user preferences and the
global configuration.)
Attributes
----------
protocol: Protocol
The protocol to use when serializing and deserializing descriptor values on
disk.
doc: string
A human-readable description of the descriptor.
should_memoize: boolean
Whether the value should be memoized for the lifetime of its Flow instance.
should_memoize_for_query: boolean
Whether the value should be memoized for the lifetime of a Flow.get() call.
(Only relevant if ``should_memoize`` is False.)
should_persist: boolean
Whether the value should be persisted.
is_composite: boolean
Whether the value contains other descriptor values. (If so, it's desirable to
get it out of memory quickly.)
"""
protocol = attr.ib()
doc = attr.ib()
should_memoize = attr.ib(default=False)
should_memoize_for_query = attr.ib(default=False)
should_persist = attr.ib(default=False)
is_composite = attr.ib(default=True)
@attr.s(frozen=True)
class TaskKey:
"""
A unique identifier for a Task.
"""
dnode = attr.ib()
case_key = attr.ib()
def evolve(self, **kwargs):
return attr.evolve(self, **kwargs)
def __str__(self):
args_str = ", ".join(f"{name}={value}" for name, value in self.case_key.items())
return f"{self.dnode.to_descriptor(near_commas=True)}({args_str})"
@attr.s(frozen=True)
class Task:
"""
A unit of work. Can have dependencies, which are referred to via their
TaskKeys.
Attributes
----------
key: TaskKey
Key corresponding to the output value computed by this task.
dep_keys: list of TaskKeys
Keys corresponding to the input values required by this task.
compute_func: function taking a single ``dep_values`` argument
Generates output values based on the passed input values.
is_simple_lookup: boolean
Whether this task consists of simply looking up the fixed value of an entity;
used to determine what message to log when this task is computed.
"""
key = attr.ib()
dep_keys = attr.ib(converter=tuple)
compute_func = attr.ib()
is_simple_lookup = attr.ib(default=False)
def compute(self, dep_values):
return self.compute_func(dep_values)
@property
def can_be_serialized(self):
return not self.is_simple_lookup
def evolve(self, **kwargs):
return attr.evolve(self, **kwargs)
def __repr__(self):
return f"Task({self.key!r}, {self.dep_keys!r})"
@attr.s(frozen=True)
class Result:
"""
Represents one value for one entity.
"""
task_key = attr.ib()
value = attr.ib()
local_artifact = attr.ib()
value_is_missing = attr.ib(default=False)
def __repr__(self):
return f"Result({self.task_key!r}, {self.value!r})"
@attr.s(frozen=True)
class Artifact:
"""
Represents a serialized, file-like artifact, either on a local filesystem or in a
cloud object store.
"""
url: str = attr.ib()
content_hash: str = attr.ib()
def evolve(self, **kwargs):
return attr.evolve(self, **kwargs)
class CaseKeySpace(ImmutableSequence):
"""
A set of CaseKey names (without values) -- represents a space of possible
CaseKeys.
"""
def __init__(self, names=None):
if names is None:
names = []
super(CaseKeySpace, self).__init__(sorted(names))
def union(self, other):
return CaseKeySpace(set(self).union(other))
def intersection(self, other):
return CaseKeySpace(name for name in self if name in other)
def difference(self, other):
return CaseKeySpace(name for name in self if name not in other)
def select(self, case_key):
return case_key.project(self)
@classmethod
def union_all(cls, spaces):
if not spaces:
return CaseKeySpace([])
names = set()
for space in spaces:
names = names.union(space)
return CaseKeySpace(names)
@classmethod
def intersection_all(cls, spaces):
if not spaces:
raise ValueError("Can't take the intersection of zero spaces")
names = None
for space in spaces:
if names is None:
names = set(spaces)
else:
names = names.intersection(space)
return CaseKeySpace(names)
def __repr__(self):
return f'CaseKeySpace({", ".join(repr(name) for name in self)})'
class CaseKey(ImmutableMapping):
"""
A collection of name-token pairs that uniquely identifies a case.
"""
def __init__(self, name_token_pairs):
tokens_by_name = {name: token for name, token in name_token_pairs}
super(CaseKey, self).__init__(tokens_by_name)
self._name_token_pairs = name_token_pairs
self.tokens = tokens_by_name
self.space = CaseKeySpace(list(tokens_by_name.keys()))
self.missing_names = [
# None is a sentinel value used to indicate that no value is available.
# Normally I would prefer to represent missing-ness out-of-band by making the
# `missing_names` field the source of truth here, but the relational methods like
# `project` are cleaner when we use a sentinel value.
name
for name, token in name_token_pairs
if token is None
]
self.has_missing_values = len(self.missing_names) > 0
def project(self, key_space):
return CaseKey(
[
(name, token)
for name, token in self._name_token_pairs
if name in key_space
]
)
def drop(self, key_space):
return CaseKey(
[
(name, token)
for name, token in self._name_token_pairs
if name not in key_space
]
)
def merge(self, other):
tokens_by_name = {name: token for name, token in self._name_token_pairs}
for name, token in other._name_token_pairs:
if name in tokens_by_name:
assert token == tokens_by_name[name]
else:
tokens_by_name[name] = token
return CaseKey([(name, token) for name, token in tokens_by_name.items()])
def __repr__(self):
args_str = ", ".join(f"{name}={token}" for name, token in self.items())
return f"CaseKey({args_str})"
class ResultGroup(ImmutableSequence):
"""
Represents a collection of Results, distinguished by their CaseKeys. Each
CaseKey should have the same set of names.
"""
def __init__(self, results, key_space):
super(ResultGroup, self).__init__(results)
self.key_space = key_space
def __repr__(self):
return f"ResultGroup({list(self)!r})"
def str_from_version_value(value):
if value is None:
return "0"
elif isinstance(value, int):
return str(value)
elif isinstance(value, str):
return value
else:
raise ValueError(f"Version values must be str, int, or None: got {value!r}")
# The CodeVersion and CodeFingerprint classs are used (indirectly) by
# persistence.ArtifactMetadataRecord and can be serialized to YAML and stored in the
# persistent cache. That means if we add new fields to them, we also need to update
# persistence.CACHE_SCHEMA_VERSION.
# TODO Should we just move these classes to persistence.py as well?
@attr.s(frozen=True)
class CodeVersion:
"""
Contains the user-designated version of a piece of code, consisting of a
major and a minor version string, and a boolean that indicates whether it
includes the bytecode. The convention is that changing the major version
indicates a functional change, while changing the minor version indicates a
nonfunctional change. If ``includes_bytecode`` is True, then the major version
is understood to implicitly include the bytecode of the code as well.
"""
major: str = attr.ib(converter=str_from_version_value)
minor: str = attr.ib(converter=str_from_version_value)
includes_bytecode: bool = attr.ib(converter=attr.converters.default_if_none(True))
@attr.s(frozen=True)
class CodeVersioningPolicy:
|
@attr.s(frozen=True)
class CodeFingerprint:
"""
A collection of characteristics attempting to uniquely identify a function.
Attributes
----------
version: CodeVersion
A version identifier provided by the user.
bytecode_hash: str
A hash of the function's Python bytecode.
orig_flow_name: str
The name of the flow in which this function was originally defined.
is_identity: bool
If True, indicates that this function is equivalent to the identity function:
it takes one argument and returns it unchanged.
"""
version: CodeVersion = attr.ib()
bytecode_hash: str = attr.ib()
orig_flow_name: str = attr.ib()
is_identity: bool = attr.ib(default=False)
@attr.s(frozen=True)
class VersioningPolicy:
"""
Encodes the versioning rules to use when computing entity values.
"""
check_for_bytecode_errors = attr.ib()
treat_bytecode_as_functional = attr.ib()
ignore_bytecode_exceptions = attr.ib()
@attr.s(frozen=True)
class FunctionAttributes:
"""
Describes properties of a Python function.
"""
code_fingerprint = attr.ib()
code_versioning_policy = attr.ib()
changes_per_run = attr.ib()
aip_task_config = attr.ib()
| """
Contains the version of the user entity function with any additional settings
related to the version. For now, we only have one setting that affects the
analysis-time behavior of the version.
"""
version: CodeVersion = attr.ib()
suppress_bytecode_warnings: bool = attr.ib(
converter=attr.converters.default_if_none(False)
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.