file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
image_metadata.py
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack.api_version_request import \
MAX_IMAGE_META_PROXY_API_VERSION
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import image_metadata
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
import nova.image
|
def __init__(self):
self.image_api = nova.image.API()
def _get_image(self, context, image_id):
try:
return self.image_api.get(context, image_id)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
except exception.ImageNotFound:
msg = _("Image not found.")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((403, 404))
def index(self, req, image_id):
"""Returns the list of metadata for a given instance."""
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
return dict(metadata=metadata)
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((403, 404))
def show(self, req, image_id, id):
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
if id in metadata:
return {'meta': {id: metadata[id]}}
else:
raise exc.HTTPNotFound()
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((400, 403, 404))
@validation.schema(image_metadata.create)
def create(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
for key, value in body['metadata'].items():
image['properties'][key] = value
common.check_img_metadata_properties_quota(context,
image['properties'])
try:
image = self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(metadata=image['properties'])
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((400, 403, 404))
@validation.schema(image_metadata.update)
def update(self, req, image_id, id, body):
context = req.environ['nova.context']
meta = body['meta']
if id not in meta:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
image = self._get_image(context, image_id)
image['properties'][id] = meta[id]
common.check_img_metadata_properties_quota(context,
image['properties'])
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(meta=meta)
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((400, 403, 404))
@validation.schema(image_metadata.update_all)
def update_all(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
metadata = body['metadata']
common.check_img_metadata_properties_quota(context, metadata)
image['properties'] = metadata
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(metadata=metadata)
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((403, 404))
@wsgi.response(204)
def delete(self, req, image_id, id):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
if id not in image['properties']:
msg = _("Invalid metadata key")
raise exc.HTTPNotFound(explanation=msg)
image['properties'].pop(id)
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
|
class ImageMetadataController(wsgi.Controller):
"""The image metadata API controller for the OpenStack API."""
|
transaction-list.component_20190202172717.ts
|
import { Component, OnInit, Input, ViewChild, AfterViewInit, OnDestroy } from '@angular/core';
import { Transaction } from '../transaction';
import { DataTableDirective } from 'angular-datatables';
import { Subject, Subscription } from 'rxjs';
import { DeliveryStatusService } from 'src/app/shared/services/delivery-status.service';
import { DeliveryStatus } from '../delivery-status';
import { Shipment } from '../../shipment/Shipment';
import { IDTypeService } from 'src/app/shared/services/idtype.service';
import { IDType } from '../idtype';
import { NgbModal, ModalDismissReasons } from '@ng-bootstrap/ng-bootstrap';
import { CustomerService } from '../../customer/customer.service';
import { Customer } from '../../customer/customer';
import { TransactionService } from '../transaction.service';
@Component({
selector: 'app-transaction-list',
templateUrl: './transaction-list.component.html',
styleUrls: ['./transaction-list.component.scss']
})
export class TransactionListComponent implements OnInit, AfterViewInit, OnDestroy {
@ViewChild(DataTableDirective)
|
public dtElement: DataTableDirective;
dtTrigger: Subject<any> = new Subject();
DeliveryStatuses = {};
deliverypersonseting = {};
IDTypeSeting= {};
@Input() list: Shipment[];
@Input() isDelivered: boolean;
statusesSub: Subscription;
deliverypersons: DeliveryStatus[];
returnResons: DeliveryStatus[];
idTypesSub: Subscription;
IDTypes: IDType[];
/* closeResult: string; */
type: number;
customer: Customer;
transactions: Transaction[];
config1: any = { placeholder: 'Select Return Reason', sourceField: ['Name'] };
config2: any = { placeholder: 'Select Delivery Person', sourceField: ['Name'] };
config3: any = { placeholder: 'Select ID Types', sourceField: ['Name'] };
dtOptions: DataTables.Settings = {};
constructor(
private _Service: TransactionService,
private _deliversStatausService: DeliveryStatusService,
private _idTypesService: IDTypeService,
private modalService: NgbModal,
private _customerService: CustomerService
) {}
openModal(content, type, Id?) {
this.type = type;
if (type === 1) {
this._customerService.getCustomerDetails(Id).subscribe(res => {
this.customer = res;
});
} else if (type === 2) {
this._Service.getHistory(Id).subscribe(res => {
this.transactions = res;
});
}
this.modalService
.open(content, { ariaLabelledBy: 'modal-basic-title' })
.result.then
/* result => {
this.closeResult = `Closed with: ${result}`;
},
reason => {
this.closeResult = `Dismissed ${this.getDismissReason(reason)}`;
} */
();
}
/* private getDismissReason(reason: any): string {
if (reason === ModalDismissReasons.ESC) {
return 'by pressing ESC';
} else if (reason === ModalDismissReasons.BACKDROP_CLICK) {
return 'by clicking on a backdrop';
} else {
return `with: ${reason}`;
}
} */
ngAfterViewInit(): void {
this.dtTrigger.next();
this.dtElement.dtInstance.then((dtInstance: DataTables.Api) => {
dtInstance.state.clear();
});
}
ngOnInit() {
this.statusesSub = this._deliversStatausService.getAll().subscribe(result => {
this.deliverypersons = result.filter(f => f.IsDelivery);
this.returnResons = result.filter(f => !f.IsDelivery);
});
this.DeliveryStatuses = {
singleSelection: true,
text: 'DeliveryStatuses',
selectAllText: 'Select All',
unSelectAllText: 'UnSelect All',
enableSearchFilter: true,
badgeShowLimit: 3,
labelKey: 'NameEn',
primaryKey: 'ID',
showCheckbox: false
};
this.deliverypersonseting = {
singleSelection: true,
text: 'deliverypersons',
selectAllText: 'Select All',
unSelectAllText: 'UnSelect All',
enableSearchFilter: true,
badgeShowLimit: 3,
labelKey: 'NameEn',
primaryKey: 'ID',
showCheckbox: false
};
this.IDTypeSeting = {
singleSelection: true,
text: 'IDTypes',
selectAllText: 'Select All',
unSelectAllText: 'UnSelect All',
enableSearchFilter: true,
badgeShowLimit: 3,
labelKey: 'NameEn',
primaryKey: 'ID',
showCheckbox: false
};
this.idTypesSub = this._idTypesService.getAll().subscribe(result => {
this.IDTypes = result;
});
this.dtOptions = {
pagingType: 'full_numbers',
stateSave: true
};
this.refreshDataSource(this.list);
}
ngOnDestroy(): void {
this.statusesSub.unsubscribe();
this.idTypesSub.unsubscribe();
}
refreshDataSource(list): void {
if (list !== undefined) {
this.dtElement.dtInstance.then((dtInstance: DataTables.Api) => {
dtInstance.destroy();
this.list = list;
this.dtTrigger.next();
});
}
}
}
| |
import-ty-params.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod a {
pub mod b {
pub mod c {
pub struct S<T>(T);
}
}
}
macro_rules! import {
($p: path) => (use $p;);
}
fn f1()
|
fn f2() {
import! { a::b::c::S<> } //~ ERROR unexpected generic arguments in path
}
fn main() {}
|
{
import! { a::b::c::S<u8> } //~ ERROR unexpected generic arguments in path
}
|
source.rs
|
pub fn to_source(payment_address: &str, seq_no: i32) -> Option<String> {
let addr_parts: Vec<&str> = payment_address.split(":").collect();
if addr_parts.len() != 3 {
return None
}
let mut source_parts: Vec<String> = Vec::new();
source_parts.push(addr_parts.get(0).unwrap().to_string());
source_parts.push(addr_parts.get(1).unwrap().to_string());
source_parts.push(seq_no.to_string() + "_" + addr_parts.get(2).unwrap());
Some(source_parts.join(":"))
}
pub fn from_source(source: &str) -> Option<(i32, String)>
|
{
let source_parts: Vec<&str> = source.split(":").collect();
if source_parts.len() != 3 {
return None
}
let last = source_parts.get(2).unwrap();
let last_parts: Vec<&str> = last.split("_").collect();
if last_parts.len() != 2 {
return None
}
let seq_no = match last_parts.get(0).unwrap().to_string().parse() {
Ok(v) => v,
Err(_) => return None
};
let mut recipient_parts = Vec::new();
recipient_parts.push(source_parts.get(0).unwrap().to_string());
recipient_parts.push(source_parts.get(1).unwrap().to_string());
recipient_parts.push(last_parts.get(1).unwrap().to_string());
Some((seq_no, recipient_parts.join(":")))
}
|
|
util.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package channel
import (
"sync"
"github.com/elastic/beats/libbeat/beat"
)
type subOutlet struct {
done chan struct{}
ch chan beat.Event
res chan bool
mutex sync.Mutex
closeOnce sync.Once
}
// SubOutlet create a sub-outlet, which can be closed individually, without closing the
// underlying outlet.
func SubOutlet(out Outleter) Outleter {
s := &subOutlet{
done: make(chan struct{}),
ch: make(chan beat.Event),
res: make(chan bool, 1),
}
go func() {
for event := range s.ch {
s.res <- out.OnEvent(event)
}
}()
|
func (o *subOutlet) Close() error {
o.closeOnce.Do(func() {
// Signal OnEvent() to terminate
close(o.done)
// This mutex prevents the event channel to be closed if OnEvent is
// still running.
o.mutex.Lock()
defer o.mutex.Unlock()
close(o.ch)
})
return nil
}
func (o *subOutlet) Done() <-chan struct{} {
return o.done
}
func (o *subOutlet) OnEvent(event beat.Event) bool {
o.mutex.Lock()
defer o.mutex.Unlock()
select {
case <-o.done:
return false
default:
}
select {
case <-o.done:
return false
case o.ch <- event:
select {
case <-o.done:
// Note: log harvester specific (leaky abstractions).
// The close at this point in time indicates an event
// already send to the publisher worker, forwarding events
// to the publisher pipeline. The harvester insists on updating the state
// (by pushing another state update to the publisher pipeline) on shutdown
// and requires most recent state update in the harvester (who can only
// update state on 'true' response).
// The state update will appear after the current event in the publisher pipeline.
// That is, by returning true here, the final state update will
// be presented to the registrar, after the last event being processed.
// Once all messages are in the publisher pipeline, in correct order,
// it depends on registrar/publisher pipeline if state is finally updated
// in the registrar.
return true
case ret := <-o.res:
return ret
}
}
}
// CloseOnSignal closes the outlet, once the signal triggers.
func CloseOnSignal(outlet Outleter, sig <-chan struct{}) Outleter {
if sig != nil {
go func() {
select {
case <-outlet.Done():
return
case <-sig:
outlet.Close()
}
}()
}
return outlet
}
|
return s
}
|
ruby.js
|
define(function(require, exports, module) {
"use strict";
|
exports.snippetText = require("../requirejs/text!./ruby.snippets");
exports.scope = "ruby";
});
| |
index.js
|
import React, { useState, useEffct } from "react";
import Layout from "components/Layout";
import Sidebar from "components/Sidebar";
import { getDataCookie } from "middleware/authPage";
import axios from "utils/axios";
import { useRouter } from "next/router";
import PersonalInfo from "components/Profile/PersonalInfo";
export async function
|
(context) {
const dataCookie = await getDataCookie(context);
if (!dataCookie.isLogin) {
return {
redirect: {
destination: "/auth/login",
permanent: false,
},
};
}
const historyList = await axios
.get(`/transaction/history?page=1&limit=6&filter=MONTH`, {
headers: {
Authorization: `Bearer ${dataCookie.token}`,
},
})
.then((res) => {
console.log("THEN");
console.log(res);
return res.data.data;
})
.catch((err) => {
console.log("CATCH");
return [];
});
return {
props: { historyList: historyList },
};
}
export default function EditPersonalInfoUser(props) {
const router = useRouter();
console.log(router.query);
console.log(props.historyList);
return (
<>
<Layout title="Dashboard | History">
<div className="container">
<div className="row rp">
<Sidebar activePage="menu4" />
<div className="row col-lg-9 rp">
<PersonalInfo />
</div>
</div>
</div>
</Layout>
</>
);
}
|
getServerSideProps
|
transfer_learning_tutorial.py
|
# -*- coding: utf-8 -*-
"""
컴퓨터 비전(Vision)을 위한 전이학습(Transfer Learning)
=======================================================
**Author**: `Sasank Chilamkurthy <https://chsasank.github.io>`_
**번역**: `박정환 <http://github.com/9bow>`_
이 튜토리얼에서는 전이학습(Transfer Learning)을 이용하여 이미지 분류를 위한
합성곱 신경망을 어떻게 학습시키는지 배워보겠습니다. 전이학습에 대해서는
`CS231n 노트 <http://cs231n.github.io/transfer-learning/>`__ 에서 더 많은 내용을
읽어보실 수 있습니다.
위 노트를 인용해보면,
실제로 충분한 크기의 데이터셋을 갖추기는 상대적으로 드물기 때문에,
(무작위 초기화를 통해) 맨 처음부터 합성곱 신경망(Convolutional
Network) 전체를 학습하는 사람은 매우 적습니다. 대신, 매우 큰 데이터셋(예.
100가지 분류에 대해 120만개의 이미지가 포함된 ImageNet)에서 합성곱
신경망(ConvNet)을 미리 학습한 후, 이 합성곱 신경망을 관심있는 작업
을 위한 초기 설정 또는 고정된 특징 추출기(fixed feature extractor)로 사용합니다.
이러한 전이학습 시나리오의 주요한 2가지는 다음과 같습니다:
- **합성곱 신경망의 미세조정(finetuning)**: 무작위 초기화 대신, 신경망을
ImageNet 1000 데이터셋 등으로 미리 학습한 신경망으로 초기화합니다. 학습의 나머지
과정들은 평상시와 같습니다.
- **고정된 특징 추출기로써의 합성곱 신경망**: 여기서는 마지막에 완전히 연결
된 계층을 제외한 모든 신경망의 가중치를 고정합니다. 이 마지막의 완전히 연결된
계층은 새로운 무작위의 가중치를 갖는 계층으로 대체되어 이 계층만 학습합니다.
"""
# License: BSD
# Author: Sasank Chilamkurthy
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # 대화형 모드
######################################################################
# 데이터 불러오기
# ---------------
#
# 데이터를 불러오기 위해 torchvision과 torch.utils.data 패키지를 사용하겠습니다.
#
# 여기서 풀고자 하는 문제는 **개미** 와 **벌** 을 분류하는 모델을 학습하는 것입니다.
# 개미와 벌 각각의 학습용 이미지는 대략 120장 정도 있고, 75개의 검증용 이미지가
# 있습니다. 일반적으로 맨 처음부터 학습을 한다면 이는 일반화하기에는 아주 작은
# 데이터셋입니다. 하지만 우리는 전이학습을 할 것이므로, 일반화를 제법 잘 할 수 있을
# 것입니다.
#
# 이 데이터셋은 ImageNet의 아주 작은 일부입니다.
#
# .. Note ::
# 데이터를 `여기 <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`_
# 에서 다운로드 받아 현재 디렉토리에 압축을 푸십시오.
# 학습을 위해 데이터 증가(augmentation) 및 일반화(normalization)
# 검증을 위한 일반화
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'data/hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
######################################################################
# 일부 이미지 시각화하기
# ^^^^^^^^^^^^^^^^^^^^^^^^^
# 데이터 증가를 이해하기 위해 일부 학습용 이미지를 시각화해보겠습니다.
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # 갱신이 될 때까지 잠시 기다립니다.
# 학습 데이터의 배치를 얻습니다.
inputs, classes = next(iter(dataloaders['train']))
# 배치로부터 격자 형태의 이미지를 만듭니다.
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
######################################################################
# 모델 학습하기
# --------------
#
# 이제 모델을 학습하기 위한 일반 함수를 작성해보겠습니다. 여기서는 다음 내용들을
# 설명합니다:
#
# - 학습율(learning rate) 관리(scheduling)
# - 최적의 모델 구하기
#
# 아래에서 ``scheduler`` 매개변수는 ``torch.optim.lr_scheduler`` 의 LR 스케쥴러
# 객체(Object)입니다.
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# 각 에폭(epoch)은 학습 단계와 검증 단계를 갖습니다.
for phase in ['train', 'val']:
if phase == 'train':
model.train() # 모델을 학습 모드로 설정
else:
model.eval() # 모델을 평가 모드로 설정
running_loss = 0.0
running_corrects = 0
# 데이터를 반복
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# 매개변수 경사도를 0으로 설정
optimizer.zero_grad()
# 순전파
# 학습 시에만 연산 기록을 추적
with torch.set_grad_enabled(phase == 'train'):
|
# 학습 단계인 경우 역전파 + 최적화
if phase == 'train':
loss.backward()
optimizer.step()
# 통계
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# 모델을 깊은 복사(deep copy)함
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# 가장 나은 모델 가중치를 불러옴
model.load_state_dict(best_model_wts)
return model
######################################################################
# 모델 예측값 시각화하기
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# 일부 이미지에 대한 예측값을 보여주는 일반화된 함수입니다.
#
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
######################################################################
# 합성곱 신경망 미세조정(finetuning)
# ----------------------------------
#
# 미리 학습한 모델을 불러온 후 마지막의 완전히 연결된 계층을 초기화합니다.
#
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
# 여기서 각 출력 샘플의 크기는 2로 설정합니다.
# 또는, nn.Linear(num_ftrs, len (class_names))로 일반화할 수 있습니다.
model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# 모든 매개변수들이 최적화되었는지 관찰
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# 7 에폭마다 0.1씩 학습율 감소
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
######################################################################
# 학습 및 평가하기
# ^^^^^^^^^^^^^^^^^^
#
# CPU에서는 15-25분 가량, GPU에서는 1분도 이내의 시간이 걸립니다.
#
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
######################################################################
#
visualize_model(model_ft)
######################################################################
# 고정된 특징 추출기로써의 합성곱 신경망
# ---------------------------------------
#
# 이제, 마지막 계층을 제외한 신경망의 모든 부분을 고정해야 합니다.
# ``requires_grad == False`` 로 설정하여 매개변수를 고정하여 ``backward()`` 중에
# 경사도가 계산되지 않도록 해야합니다.
#
# 이에 대한 문서는
# `여기 <http://pytorch.org/docs/notes/autograd.html#excluding-subgraphs-from-backward>`__
# 에서 확인할 수 있습니다.
#
model_conv = torchvision.models.resnet18(pretrained=True)
for param in model_conv.parameters():
param.requires_grad = False
# 새로 생성된 모듈의 매개변수는 기본값이 requires_grad=True 임
num_ftrs = model_conv.fc.in_features
model_conv.fc = nn.Linear(num_ftrs, 2)
model_conv = model_conv.to(device)
criterion = nn.CrossEntropyLoss()
# 이전과는 다르게 마지막 계층의 매개변수들만 최적화되는지 관찰
optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)
# 7 에폭마다 0.1씩 학습율 감소
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)
######################################################################
# 학습 및 평가하기
# ^^^^^^^^^^^^^^^^^
#
# CPU에서 실행하는 경우 이전과 비교했을 때 약 절반 가량의 시간만이 소요될 것입니다.
# 이는 대부분의 신경망에서 경사도를 계산할 필요가 없기 때문입니다. 하지만,
# 순전파는 계산이 필요합니다.
#
model_conv = train_model(model_conv, criterion, optimizer_conv,
exp_lr_scheduler, num_epochs=25)
######################################################################
#
visualize_model(model_conv)
plt.ioff()
plt.show()
######################################################################
# 더 배워볼 내용
# -----------------
#
# 전이학습의 응용 사례(application)들을 더 알아보려면,
# :doc:`/intermediate/quantized_transfer_learning_tutorial` 을 참조해보세요.
#
#
|
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
|
page_objects.py
|
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from e2e.tests.selenium.locators import LearningCircleCreationPageLocators
from e2e.tests.selenium.locators import RegistrationModalLocators
import datetime
import time
class BasePage(object):
def __init__(self, driver, wait):
self.driver = driver
self.wait = wait
def fill_text_field(self, locator, *text):
input_field = self.driver.find_element(*locator)
try:
input_field.clear()
except:
pass
finally:
input_field.send_keys(*text)
def fill_rich_text_field(self, locator, *text):
tinymce_iframe = self.wait.until(expected_conditions.presence_of_element_located(locator))
self.driver.switch_to_frame(tinymce_iframe)
rich_text_field = self.wait.until(expected_conditions.presence_of_element_located(LearningCircleCreationPageLocators.TINYMCE_FIELD))
rich_text_field.send_keys(*text)
self.driver.switch_to_default_content()
class LearningCircleCreationPage(BasePage):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def fill_out_form_correctly(self):
self.select_first_course()
self.click_next_button()
self.fill_city_select_field("Kitchener")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_NAME_FIELD, "KPL")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_DETAILS_FIELD, "Hacienda Cafe")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_ADDRESS_FIELD, "85 Queen St N, Kitchener")
self.click_next_button()
self.select_start_date()
self.select_suggested_dates()
self.wait.until(expected_conditions.presence_of_element_located((By.CSS_SELECTOR, '#selected-dates li')))
self.fill_text_field(LearningCircleCreationPageLocators.MEETING_TIME_FIELD, "7:00 PM", Keys.ENTER)
self.fill_text_field(LearningCircleCreationPageLocators.MEETING_END_TIME_FIELD, "8:00 PM", Keys.ENTER)
self.click_next_button()
self.fill_text_field(LearningCircleCreationPageLocators.TITLE_FIELD, "Sharon's Learning Circle")
self.fill_rich_text_field(LearningCircleCreationPageLocators.DESCRIPTION_FIELD, "Welcome to my learning circle!")
self.fill_rich_text_field(LearningCircleCreationPageLocators.COURSE_DESCRIPTION_FIELD, "This is the course description")
self.fill_text_field(LearningCircleCreationPageLocators.SIGNUP_QUESTION_FIELD, "What do you want to learn?")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_WEBSITE_FIELD, "https://www.kpl.org")
self.click_next_button()
self.fill_text_field(LearningCircleCreationPageLocators.FACILITATOR_GOAL_FIELD, "Have a great learning circle")
self.fill_text_field(LearningCircleCreationPageLocators.FACILITATOR_CONCERNS_FIELD, "Nothing really")
def select_start_date(self):
calendar_date = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.CALENDAR_TODAY))
calendar_date.click()
def select_suggested_dates(self):
btn = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.ACCEPT_SUGGESTED_DATES_BUTTON))
# use this instead of btn.click() since the button is out of view
self.driver.execute_script("return arguments[0].click();", btn)
def select_first_course(self):
course_cards = self.wait.until(expected_conditions.visibility_of_all_elements_located(LearningCircleCreationPageLocators.COURSE_CARDS))
self.wait.until(expected_conditions.text_to_be_present_in_element(LearningCircleCreationPageLocators.FIRST_COURSE_TITLE, "Academic Writing"))
course_select_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.FIRST_COURSE_BUTTON))
# button is out of view
self.driver.execute_script("return arguments[0].click();", course_select_button)
# wait until search container is gone
self.wait.until_not(expected_conditions.presence_of_element_located((By.CSS_SELECTOR, '.search-container')))
remove_link = self.wait.until(expected_conditions.visibility_of_element_located(LearningCircleCreationPageLocators. REMOVE_COURSE_SELECTION_LINK))
assert 'Remove selection' in remove_link.text
def fill_city_select_field(self, location):
city_select = self.wait.until(expected_conditions.visibility_of_element_located(LearningCircleCreationPageLocators.CITY_SELECT_INPUT))
city_select.send_keys(location)
self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.CITY_SELECT_OPTION))
city_select.send_keys(Keys.ENTER)
def click_next_button(self):
next_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.NEXT_TAB_BUTTON))
next_button.click()
def click_publish_button(self):
publish_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.PUBLISH_BUTTON))
publish_button.click()
def click_save_button(self):
publish_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.SAVE_BUTTON))
publish_button.click()
def click_modal_button(self):
modal_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.MODAL_BUTTON))
|
meetings_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.SCHEDULE_MEETINGS_BUTTON))
meetings_button.click()
def click_login_link(self):
self.driver.find_element_by_css_selector('.registration-modal-content button:first-child').click()
def fill_out_login_modal(self, user_data):
self.fill_text_field(RegistrationModalLocators.EMAIL_FIELD, user_data["email"])
self.fill_text_field(RegistrationModalLocators.PASSWORD_FIELD, user_data["password"])
self.driver.find_element(*RegistrationModalLocators.SUBMIT_BUTTON).click()
def go_to_tab_1(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_1))
tab_button.click()
def go_to_tab_2(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_2))
tab_button.click()
def go_to_tab_3(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_3))
tab_button.click()
def go_to_tab_4(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_4))
tab_button.click()
def go_to_tab_5(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_5))
tab_button.click()
def close_alert(self):
close_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.ALERT_CLOSE_BUTTON))
close_button.click()
|
modal_button.click()
def click_schedule_meetings_button(self):
|
channel_group.ts
|
import * as vs from "../../../../domain/validation"
import {
ChannelGroupTimelineQueryRepository,
MessageQueryRepository,
} from "../../../../infrastructure/prisma/repository"
import { InternalErrorSpec, UnexpectedErrorSpec, raise } from "../../error"
import { MethodFacts, defineArguments, defineErrors, defineMethod } from "../../define"
import { ContentTypes } from "../../facts/content_type"
import { HttpMethods } from "../../facts/http_method"
import { MessageEntity } from "../../../../domain/entity/Message"
import { MethodIdentifiers } from "../../identifier"
import { SortOrder } from "../../../../domain/repository/query/ChannelGroupTimeline"
export const argumentSpecs = defineArguments(
["channel_group_id", "since_id", "max_id", "limit", "sort_order"] as const,
{
channel_group_id: {
description: ["チャンネルグループID"],
examples: ["123456"],
required: true,
validator: vs.channelGroupId(),
},
max_id: {
description: ["このID以前の投稿を取得します", "`max_id`の投稿は含まれません"],
examples: ["123456"],
required: false,
validator: vs.integer(),
},
since_id: {
description: ["このID以降の投稿を取得します", "`since_id`の投稿は含まれません"],
examples: ["123456"],
required: false,
validator: vs.integer(),
},
limit: {
description: ["取得する投稿の上限"],
examples: ["50"],
required: false,
validator: vs.integer({ minValue: 1, maxValue: 100 }),
},
sort_order: {
description: ["取得する投稿のソート順"],
examples: ["descending"],
required: false,
validator: vs.string(),
},
}
)
export const expectedErrorSpecs = defineErrors(
["not_found", "internal_error", "unexpected_error"] as const,
argumentSpecs,
{
not_found: {
description: ["指定されたチャンネルグループが見つかりませんでした"],
hint: [],
argument: "channel_group_id",
code: "not_found",
},
internal_error: new InternalErrorSpec(),
unexpected_error: new UnexpectedErrorSpec(),
}
)
export const facts: MethodFacts = {
url: MethodIdentifiers.ChannelGroupTimeline,
httpMethod: HttpMethods.GET,
rateLimiting: {},
acceptedContentTypes: [ContentTypes.ApplicationJson],
authenticationRequired: false,
private: false,
acceptedAuthenticationMethods: [],
acceptedScopes: {},
description: ["チャンネルグループのタイムラインを取得します"],
}
type ReturnType = Promise<MessageEntity[]>
function getSortOrder(sortOrderString?: string) {
if (sortOrderString == SortOrder.Descending) {
return SortOrder.Descending
}
if (sortOrderString == SortOrder.Ascending) {
return SortOrder.Ascending
}
return SortOrde
|
}
export default defineMethod(facts, argumentSpecs, expectedErrorSpecs, async (args, errors, authUser): ReturnType => {
try {
const messageIds = await new ChannelGroupTimelineQueryRepository().listMessageId({
channelGroupId: args.channel_group_id,
maxId: args.max_id,
sinceId: args.since_id,
limit: args.limit ? args.limit : 30,
sortOrder: getSortOrder(args.sort_order),
})
const messageQueryRepository = new MessageQueryRepository()
const messages = []
for (const messageId of messageIds) {
const message = await messageQueryRepository.findById(messageId)
if (message) {
messages.push(message)
}
}
return messages
} catch (error) {
if (error instanceof Error) {
raise(errors["unexpected_error"], error)
} else {
raise(errors["unexpected_error"], new Error("unexpected_error"))
}
}
})
|
r.Descending
|
converterfromapi_test.go
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package api
import (
"net/url"
"testing"
"github.com/Azure/go-autorest/autorest/to"
"github.com/Azure/aks-engine/pkg/api/vlabs"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/google/go-cmp/cmp"
)
const ValidSSHPublicKey = "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEApD8+lRvLtUcyfO8N2Cwq0zY9DG1Un9d+tcmU3HgnAzBr6UR/dDT5M07NV7DN1lmu/0dt6Ay/ItjF9xK//nwVJL3ezEX32yhLKkCKFMB1LcANNzlhT++SB5tlRBx65CTL8z9FORe4UCWVJNafxu3as/BshQSrSaYt3hjSeYuzTpwd4+4xQutzbTXEUBDUr01zEfjjzfUu0HDrg1IFae62hnLm3ajG6b432IIdUhFUmgjZDljUt5bI3OEz5IWPsNOOlVTuo6fqU8lJHClAtAlZEZkyv0VotidC7ZSCfV153rRsEk9IWscwL2PQIQnCw7YyEYEffDeLjBwkH6MIdJ6OgQ== rsa-key-20170510"
func TestConvertCloudProfileToVLabs(t *testing.T) {
const (
name = "AzureStackCloud"
managementPortalURL = "https://management.local.azurestack.external/"
publishSettingsURL = "https://management.local.azurestack.external/publishsettings/index"
serviceManagementEndpoint = "https://management.azurestackci15.onmicrosoft.com/36f71706-54df-4305-9847-5b038a4cf189"
resourceManagerEndpoint = "https://management.local.azurestack.external/"
activeDirectoryEndpoint = "https://login.windows.net/"
galleryEndpoint = "https://portal.local.azurestack.external=30015/"
keyVaultEndpoint = "https://vault.azurestack.external/"
graphEndpoint = "https://graph.windows.net/"
serviceBusEndpoint = "https://servicebus.azurestack.external/"
batchManagementEndpoint = "https://batch.azurestack.external/"
storageEndpointSuffix = "core.azurestack.external"
sqlDatabaseDNSSuffix = "database.azurestack.external"
trafficManagerDNSSuffix = "trafficmanager.cn"
keyVaultDNSSuffix = "vault.azurestack.external"
serviceBusEndpointSuffix = "servicebus.azurestack.external"
serviceManagementVMDNSSuffix = "chinacloudapp.cn"
resourceManagerVMDNSSuffix = "cloudapp.azurestack.external"
containerRegistryDNSSuffix = "azurecr.io"
tokenAudience = "https://management.azurestack.external/"
)
cs := &ContainerService{
Properties: &Properties{
CustomCloudProfile: &CustomCloudProfile{
IdentitySystem: AzureADIdentitySystem,
AuthenticationMethod: ClientSecretAuthMethod,
Environment: &azure.Environment{
Name: name,
ManagementPortalURL: managementPortalURL,
PublishSettingsURL: publishSettingsURL,
ServiceManagementEndpoint: serviceManagementEndpoint,
ResourceManagerEndpoint: resourceManagerEndpoint,
ActiveDirectoryEndpoint: activeDirectoryEndpoint,
GalleryEndpoint: galleryEndpoint,
KeyVaultEndpoint: keyVaultEndpoint,
GraphEndpoint: graphEndpoint,
ServiceBusEndpoint: serviceBusEndpoint,
BatchManagementEndpoint: batchManagementEndpoint,
StorageEndpointSuffix: storageEndpointSuffix,
SQLDatabaseDNSSuffix: sqlDatabaseDNSSuffix,
TrafficManagerDNSSuffix: trafficManagerDNSSuffix,
KeyVaultDNSSuffix: keyVaultDNSSuffix,
ServiceBusEndpointSuffix: serviceBusEndpointSuffix,
ServiceManagementVMDNSSuffix: serviceManagementVMDNSSuffix,
ResourceManagerVMDNSSuffix: resourceManagerVMDNSSuffix,
ContainerRegistryDNSSuffix: containerRegistryDNSSuffix,
TokenAudience: tokenAudience,
},
},
},
}
vlabscs := ConvertContainerServiceToVLabs(cs)
if vlabscs.Properties.CustomCloudProfile.AuthenticationMethod != ClientSecretAuthMethod {
t.Errorf("incorrect AuthenticationMethod, expect: '%s', actual: '%s'", ClientSecretAuthMethod, vlabscs.Properties.CustomCloudProfile.AuthenticationMethod)
}
if vlabscs.Properties.CustomCloudProfile.IdentitySystem != AzureADIdentitySystem {
t.Errorf("incorrect IdentitySystem, expect: '%s', actual: '%s'", AzureADIdentitySystem, vlabscs.Properties.CustomCloudProfile.IdentitySystem)
}
if vlabscs.Properties.CustomCloudProfile.Environment.Name != name {
t.Errorf("incorrect Name, expect: '%s', actual: '%s'", name, vlabscs.Properties.CustomCloudProfile.Environment.Name)
}
if vlabscs.Properties.CustomCloudProfile.Environment.ManagementPortalURL != managementPortalURL {
t.Errorf("incorrect ManagementPortalURL, expect: '%s', actual: '%s'", managementPortalURL, vlabscs.Properties.CustomCloudProfile.Environment.ManagementPortalURL)
}
if vlabscs.Properties.CustomCloudProfile.Environment.PublishSettingsURL != publishSettingsURL {
t.Errorf("incorrect PublishSettingsURL, expect: '%s', actual: '%s'", publishSettingsURL, vlabscs.Properties.CustomCloudProfile.Environment.PublishSettingsURL)
}
if vlabscs.Properties.CustomCloudProfile.Environment.ServiceManagementEndpoint != serviceManagementEndpoint {
t.Errorf("incorrect ServiceManagementEndpoint, expect: '%s', actual: '%s'", serviceManagementEndpoint, vlabscs.Properties.CustomCloudProfile.Environment.ServiceManagementEndpoint)
}
if vlabscs.Properties.CustomCloudProfile.Environment.ResourceManagerEndpoint != resourceManagerEndpoint {
t.Errorf("incorrect ResourceManagerEndpoint, expect: '%s', actual: '%s'", resourceManagerEndpoint, vlabscs.Properties.CustomCloudProfile.Environment.ResourceManagerEndpoint)
}
if vlabscs.Properties.CustomCloudProfile.Environment.ActiveDirectoryEndpoint != activeDirectoryEndpoint {
t.Errorf("incorrect ActiveDirectoryEndpoint, expect: '%s', actual: '%s'", activeDirectoryEndpoint, vlabscs.Properties.CustomCloudProfile.Environment.ActiveDirectoryEndpoint)
}
if vlabscs.Properties.CustomCloudProfile.Environment.GalleryEndpoint != galleryEndpoint {
t.Errorf("incorrect GalleryEndpoint, expect: '%s', actual: '%s'", galleryEndpoint, vlabscs.Properties.CustomCloudProfile.Environment.GalleryEndpoint)
}
if vlabscs.Properties.CustomCloudProfile.Environment.KeyVaultEndpoint != keyVaultEndpoint {
t.Errorf("incorrect KeyVaultEndpoint, expect: '%s', actual: '%s'", keyVaultEndpoint, vlabscs.Properties.CustomCloudProfile.Environment.KeyVaultEndpoint)
}
if vlabscs.Properties.CustomCloudProfile.Environment.GraphEndpoint != graphEndpoint {
t.Errorf("incorrect GraphEndpoint, expect: '%s', actual: '%s'", graphEndpoint, vlabscs.Properties.CustomCloudProfile.Environment.GraphEndpoint)
}
if vlabscs.Properties.CustomCloudProfile.Environment.ServiceBusEndpoint != serviceBusEndpoint {
t.Errorf("incorrect ServiceBusEndpoint, expect: '%s', actual: '%s'", serviceBusEndpoint, vlabscs.Properties.CustomCloudProfile.Environment.ServiceBusEndpoint)
}
if vlabscs.Properties.CustomCloudProfile.Environment.BatchManagementEndpoint != batchManagementEndpoint {
t.Errorf("incorrect BatchManagementEndpoint, expect: '%s', actual: '%s'", batchManagementEndpoint, vlabscs.Properties.CustomCloudProfile.Environment.BatchManagementEndpoint)
}
if vlabscs.Properties.CustomCloudProfile.Environment.StorageEndpointSuffix != storageEndpointSuffix {
t.Errorf("incorrect StorageEndpointSuffix, expect: '%s', actual: '%s'", storageEndpointSuffix, vlabscs.Properties.CustomCloudProfile.Environment.StorageEndpointSuffix)
}
if vlabscs.Properties.CustomCloudProfile.Environment.SQLDatabaseDNSSuffix != sqlDatabaseDNSSuffix {
t.Errorf("incorrect SQLDatabaseDNSSuffix, expect: '%s', actual: '%s'", sqlDatabaseDNSSuffix, vlabscs.Properties.CustomCloudProfile.Environment.SQLDatabaseDNSSuffix)
}
if vlabscs.Properties.CustomCloudProfile.Environment.TrafficManagerDNSSuffix != trafficManagerDNSSuffix {
t.Errorf("incorrect TrafficManagerDNSSuffix, expect: '%s', actual: '%s'", trafficManagerDNSSuffix, vlabscs.Properties.CustomCloudProfile.Environment.TrafficManagerDNSSuffix)
}
if vlabscs.Properties.CustomCloudProfile.Environment.KeyVaultDNSSuffix != keyVaultDNSSuffix {
t.Errorf("incorrect KeyVaultDNSSuffix, expect: '%s', actual: '%s'", keyVaultDNSSuffix, vlabscs.Properties.CustomCloudProfile.Environment.KeyVaultDNSSuffix)
}
if vlabscs.Properties.CustomCloudProfile.Environment.ServiceBusEndpointSuffix != serviceBusEndpointSuffix {
t.Errorf("incorrect ServiceBusEndpointSuffix, expect: '%s', actual: '%s'", serviceBusEndpointSuffix, vlabscs.Properties.CustomCloudProfile.Environment.ServiceBusEndpointSuffix)
}
if vlabscs.Properties.CustomCloudProfile.Environment.ServiceManagementVMDNSSuffix != serviceManagementVMDNSSuffix {
t.Errorf("incorrect ServiceManagementVMDNSSuffix, expect: '%s', actual: '%s'", serviceManagementVMDNSSuffix, vlabscs.Properties.CustomCloudProfile.Environment.ServiceManagementVMDNSSuffix)
}
if vlabscs.Properties.CustomCloudProfile.Environment.ResourceManagerVMDNSSuffix != resourceManagerVMDNSSuffix {
t.Errorf("incorrect ResourceManagerVMDNSSuffix, expect: '%s', actual: '%s'", resourceManagerVMDNSSuffix, vlabscs.Properties.CustomCloudProfile.Environment.ResourceManagerVMDNSSuffix)
}
if vlabscs.Properties.CustomCloudProfile.Environment.ContainerRegistryDNSSuffix != containerRegistryDNSSuffix {
t.Errorf("incorrect ContainerRegistryDNSSuffix, expect: '%s', actual: '%s'", containerRegistryDNSSuffix, vlabscs.Properties.CustomCloudProfile.Environment.ContainerRegistryDNSSuffix)
}
if vlabscs.Properties.CustomCloudProfile.Environment.TokenAudience != tokenAudience {
t.Errorf("incorrect TokenAudience, expect: '%s', actual: '%s'", tokenAudience, vlabscs.Properties.CustomCloudProfile.Environment.TokenAudience)
}
}
func TestConvertAzureEnvironmentSpecConfigToVLabs(t *testing.T) {
//Mock AzureEnvironmentSpecConfig
cs := &ContainerService{
Properties: &Properties{
CustomCloudProfile: &CustomCloudProfile{
IdentitySystem: ADFSIdentitySystem,
AuthenticationMethod: ClientCertificateAuthMethod,
AzureEnvironmentSpecConfig: &AzureEnvironmentSpecConfig{
CloudName: "AzureStackCloud",
//DockerSpecConfig specify the docker engine download repo
DockerSpecConfig: DockerSpecConfig{
DockerEngineRepo: "DockerEngineRepo",
DockerComposeDownloadURL: "DockerComposeDownloadURL",
},
//KubernetesSpecConfig - Due to Chinese firewall issue, the default containers from google is blocked, use the Chinese local mirror instead
KubernetesSpecConfig: KubernetesSpecConfig{
AzureTelemetryPID: "AzureTelemetryPID",
KubernetesImageBase: "KubernetesImageBase",
TillerImageBase: "TillerImageBase",
ACIConnectorImageBase: "ACIConnectorImageBase",
NVIDIAImageBase: "NVIDIAImageBase",
AzureCNIImageBase: "AzureCNIImageBase",
CalicoImageBase: "CalicoImageBase",
EtcdDownloadURLBase: "EtcdDownloadURLBase",
KubeBinariesSASURLBase: "KubeBinariesSASURLBase",
WindowsTelemetryGUID: "WindowsTelemetryGUID",
CNIPluginsDownloadURL: "CNIPluginsDownloadURL",
VnetCNILinuxPluginsDownloadURL: "VnetCNILinuxPluginsDownloadURL",
VnetCNIWindowsPluginsDownloadURL: "VnetCNIWindowsPluginsDownloadURL",
ContainerdDownloadURLBase: "ContainerdDownloadURLBase",
},
DCOSSpecConfig: DCOSSpecConfig{
DCOS188BootstrapDownloadURL: "DCOS188BootstrapDownloadURL",
DCOS190BootstrapDownloadURL: "DCOS190BootstrapDownloadURL",
DCOS198BootstrapDownloadURL: "DCOS198BootstrapDownloadURL",
DCOS110BootstrapDownloadURL: "DCOS110BootstrapDownloadURL",
DCOS111BootstrapDownloadURL: "DCOS111BootstrapDownloadURL",
DCOSWindowsBootstrapDownloadURL: "DCOSWindowsBootstrapDownloadURL",
DcosRepositoryURL: "DcosRepositoryURL",
DcosClusterPackageListID: "DcosClusterPackageListID",
DcosProviderPackageID: "DcosProviderPackageID",
},
EndpointConfig: AzureEndpointConfig{
ResourceManagerVMDNSSuffix: "ResourceManagerVMDNSSuffix",
},
OSImageConfig: map[Distro]AzureOSImageConfig{
Distro("Test"): {
ImageOffer: "ImageOffer",
ImageSku: "ImageSku",
ImagePublisher: "ImagePublisher",
ImageVersion: "ImageVersion",
},
AKSUbuntu1604: AKSUbuntu1604OSImageConfig,
},
},
},
},
}
vlabscs := ConvertContainerServiceToVLabs(cs)
if vlabscs.Properties.CustomCloudProfile.AuthenticationMethod != ClientCertificateAuthMethod {
t.Errorf("incorrect AuthenticationMethod, expect: '%s', actual: '%s'", ClientCertificateAuthMethod, vlabscs.Properties.CustomCloudProfile.AuthenticationMethod)
}
if vlabscs.Properties.CustomCloudProfile.IdentitySystem != ADFSIdentitySystem {
t.Errorf("incorrect IdentitySystem, expect: '%s', actual: '%s'", ADFSIdentitySystem, vlabscs.Properties.CustomCloudProfile.IdentitySystem)
}
csSpec := cs.Properties.CustomCloudProfile.AzureEnvironmentSpecConfig
vlabscsSpec := vlabscs.Properties.CustomCloudProfile.AzureEnvironmentSpecConfig
if vlabscsSpec.CloudName != csSpec.CloudName {
t.Errorf("incorrect CloudName, expect: '%s', actual: '%s'", csSpec.CloudName, vlabscsSpec.CloudName)
}
//KubernetesSpecConfig
if vlabscsSpec.KubernetesSpecConfig.AzureTelemetryPID != csSpec.KubernetesSpecConfig.AzureTelemetryPID {
t.Errorf("incorrect AzureTelemetryPID, expect: '%s', actual: '%s'", csSpec.KubernetesSpecConfig.AzureTelemetryPID, vlabscsSpec.KubernetesSpecConfig.AzureTelemetryPID)
}
if vlabscsSpec.KubernetesSpecConfig.KubernetesImageBase != csSpec.KubernetesSpecConfig.KubernetesImageBase {
t.Errorf("incorrect KubernetesImageBase, expect: '%s', actual: '%s'", csSpec.KubernetesSpecConfig.KubernetesImageBase, vlabscsSpec.KubernetesSpecConfig.KubernetesImageBase)
}
if vlabscsSpec.KubernetesSpecConfig.TillerImageBase != csSpec.KubernetesSpecConfig.TillerImageBase {
t.Errorf("incorrect TillerImageBase, expect: '%s', actual: '%s'", csSpec.KubernetesSpecConfig.TillerImageBase, vlabscsSpec.KubernetesSpecConfig.TillerImageBase)
}
if vlabscsSpec.KubernetesSpecConfig.ACIConnectorImageBase != csSpec.KubernetesSpecConfig.ACIConnectorImageBase {
t.Errorf("incorrect ACIConnectorImageBase, expect: '%s', actual: '%s'", csSpec.KubernetesSpecConfig.ACIConnectorImageBase, vlabscsSpec.KubernetesSpecConfig.ACIConnectorImageBase)
}
if vlabscsSpec.KubernetesSpecConfig.NVIDIAImageBase != csSpec.KubernetesSpecConfig.NVIDIAImageBase {
t.Errorf("incorrect NVIDIAImageBase, expect: '%s', actual: '%s'", csSpec.KubernetesSpecConfig.NVIDIAImageBase, vlabscsSpec.KubernetesSpecConfig.NVIDIAImageBase)
}
if vlabscsSpec.KubernetesSpecConfig.AzureCNIImageBase != csSpec.KubernetesSpecConfig.AzureCNIImageBase {
t.Errorf("incorrect AzureCNIImageBase, expect: '%s', actual: '%s'", csSpec.KubernetesSpecConfig.AzureCNIImageBase, vlabscsSpec.KubernetesSpecConfig.AzureCNIImageBase)
}
if vlabscsSpec.KubernetesSpecConfig.EtcdDownloadURLBase != csSpec.KubernetesSpecConfig.EtcdDownloadURLBase {
t.Errorf("incorrect EtcdDownloadURLBase, expect: '%s', actual: '%s'", csSpec.KubernetesSpecConfig.EtcdDownloadURLBase, vlabscsSpec.KubernetesSpecConfig.EtcdDownloadURLBase)
}
if vlabscsSpec.KubernetesSpecConfig.KubeBinariesSASURLBase != csSpec.KubernetesSpecConfig.KubeBinariesSASURLBase {
t.Errorf("incorrect KubeBinariesSASURLBase, expect: '%s', actual: '%s'", csSpec.KubernetesSpecConfig.KubeBinariesSASURLBase, vlabscsSpec.KubernetesSpecConfig.KubeBinariesSASURLBase)
}
if vlabscsSpec.KubernetesSpecConfig.WindowsTelemetryGUID != csSpec.KubernetesSpecConfig.WindowsTelemetryGUID {
t.Errorf("incorrect WindowsTelemetryGUID, expect: '%s', actual: '%s'", csSpec.KubernetesSpecConfig.WindowsTelemetryGUID, vlabscsSpec.KubernetesSpecConfig.WindowsTelemetryGUID)
}
if vlabscsSpec.KubernetesSpecConfig.CNIPluginsDownloadURL != csSpec.KubernetesSpecConfig.CNIPluginsDownloadURL {
t.Errorf("incorrect CNIPluginsDownloadURL, expect: '%s', actual: '%s'", csSpec.KubernetesSpecConfig.CNIPluginsDownloadURL, vlabscsSpec.KubernetesSpecConfig.CNIPluginsDownloadURL)
}
if vlabscsSpec.KubernetesSpecConfig.VnetCNILinuxPluginsDownloadURL != csSpec.KubernetesSpecConfig.VnetCNILinuxPluginsDownloadURL {
t.Errorf("incorrect VnetCNILinuxPluginsDownloadURL, expect: '%s', actual: '%s'", csSpec.KubernetesSpecConfig.VnetCNILinuxPluginsDownloadURL, vlabscsSpec.KubernetesSpecConfig.VnetCNILinuxPluginsDownloadURL)
}
if vlabscsSpec.KubernetesSpecConfig.VnetCNIWindowsPluginsDownloadURL != csSpec.KubernetesSpecConfig.VnetCNIWindowsPluginsDownloadURL {
t.Errorf("incorrect VnetCNIWindowsPluginsDownloadURL, expect: '%s', actual: '%s'", csSpec.KubernetesSpecConfig.VnetCNIWindowsPluginsDownloadURL, vlabscsSpec.KubernetesSpecConfig.VnetCNIWindowsPluginsDownloadURL)
}
if vlabscsSpec.KubernetesSpecConfig.ContainerdDownloadURLBase != csSpec.KubernetesSpecConfig.ContainerdDownloadURLBase {
t.Errorf("incorrect ContainerdDownloadURLBase, expect: '%s', actual: '%s'", csSpec.KubernetesSpecConfig.ContainerdDownloadURLBase, vlabscsSpec.KubernetesSpecConfig.ContainerdDownloadURLBase)
}
//DockerSpecConfig
if vlabscsSpec.DockerSpecConfig.DockerComposeDownloadURL != csSpec.DockerSpecConfig.DockerComposeDownloadURL {
t.Errorf("incorrect DockerComposeDownloadURL, expect: '%s', actual: '%s'", csSpec.DockerSpecConfig.DockerComposeDownloadURL, vlabscsSpec.DockerSpecConfig.DockerComposeDownloadURL)
}
if vlabscsSpec.DockerSpecConfig.DockerEngineRepo != csSpec.DockerSpecConfig.DockerEngineRepo {
t.Errorf("incorrect DockerEngineRepo, expect: '%s', actual: '%s'", csSpec.DockerSpecConfig.DockerEngineRepo, vlabscsSpec.DockerSpecConfig.DockerEngineRepo)
}
//DCOSSpecConfig
if vlabscsSpec.DCOSSpecConfig.DCOS188BootstrapDownloadURL != csSpec.DCOSSpecConfig.DCOS188BootstrapDownloadURL {
t.Errorf("incorrect DCOS188BootstrapDownloadURL, expect: '%s', actual: '%s'", csSpec.DCOSSpecConfig.DCOS188BootstrapDownloadURL, vlabscsSpec.DCOSSpecConfig.DCOS188BootstrapDownloadURL)
}
if vlabscsSpec.DCOSSpecConfig.DCOS190BootstrapDownloadURL != csSpec.DCOSSpecConfig.DCOS190BootstrapDownloadURL {
t.Errorf("incorrect DCOS190BootstrapDownloadURL, expect: '%s', actual: '%s'", csSpec.DCOSSpecConfig.DCOS190BootstrapDownloadURL, vlabscsSpec.DCOSSpecConfig.DCOS190BootstrapDownloadURL)
}
if vlabscsSpec.DCOSSpecConfig.DCOS198BootstrapDownloadURL != csSpec.DCOSSpecConfig.DCOS198BootstrapDownloadURL {
t.Errorf("incorrect DCOS198BootstrapDownloadURL, expect: '%s', actual: '%s'", csSpec.DCOSSpecConfig.DCOS198BootstrapDownloadURL, vlabscsSpec.DCOSSpecConfig.DCOS198BootstrapDownloadURL)
}
if vlabscsSpec.DCOSSpecConfig.DCOS110BootstrapDownloadURL != csSpec.DCOSSpecConfig.DCOS110BootstrapDownloadURL {
t.Errorf("incorrect DCOS110BootstrapDownloadURL, expect: '%s', actual: '%s'", csSpec.DCOSSpecConfig.DCOS110BootstrapDownloadURL, vlabscsSpec.DCOSSpecConfig.DCOS110BootstrapDownloadURL)
}
if vlabscsSpec.DCOSSpecConfig.DCOS111BootstrapDownloadURL != csSpec.DCOSSpecConfig.DCOS111BootstrapDownloadURL {
t.Errorf("incorrect DCOS111BootstrapDownloadURL, expect: '%s', actual: '%s'", csSpec.DCOSSpecConfig.DCOS111BootstrapDownloadURL, vlabscsSpec.DCOSSpecConfig.DCOS111BootstrapDownloadURL)
}
if vlabscsSpec.DCOSSpecConfig.DCOSWindowsBootstrapDownloadURL != csSpec.DCOSSpecConfig.DCOSWindowsBootstrapDownloadURL {
t.Errorf("incorrect DCOSWindowsBootstrapDownloadURL, expect: '%s', actual: '%s'", csSpec.DCOSSpecConfig.DCOSWindowsBootstrapDownloadURL, vlabscsSpec.DCOSSpecConfig.DCOSWindowsBootstrapDownloadURL)
}
if vlabscsSpec.DCOSSpecConfig.DcosRepositoryURL != csSpec.DCOSSpecConfig.DcosRepositoryURL {
t.Errorf("incorrect DcosRepositoryURL, expect: '%s', actual: '%s'", csSpec.DCOSSpecConfig.DcosRepositoryURL, vlabscsSpec.DCOSSpecConfig.DcosRepositoryURL)
}
if vlabscsSpec.DCOSSpecConfig.DcosClusterPackageListID != csSpec.DCOSSpecConfig.DcosClusterPackageListID {
t.Errorf("incorrect DcosClusterPackageListID, expect: '%s', actual: '%s'", csSpec.DCOSSpecConfig.DcosClusterPackageListID, vlabscsSpec.DCOSSpecConfig.DcosClusterPackageListID)
}
if vlabscsSpec.DCOSSpecConfig.DcosProviderPackageID != csSpec.DCOSSpecConfig.DcosProviderPackageID {
t.Errorf("incorrect DcosProviderPackageID, expect: '%s', actual: '%s'", csSpec.DCOSSpecConfig.DcosProviderPackageID, vlabscsSpec.DCOSSpecConfig.DcosProviderPackageID)
}
//EndpointConfig
if vlabscsSpec.EndpointConfig.ResourceManagerVMDNSSuffix != csSpec.EndpointConfig.ResourceManagerVMDNSSuffix {
t.Errorf("incorrect ResourceManagerVMDNSSuffix, expect: '%s', actual: '%s'", csSpec.EndpointConfig.ResourceManagerVMDNSSuffix, vlabscsSpec.EndpointConfig.ResourceManagerVMDNSSuffix)
}
//OSImageConfig
for k, v := range csSpec.OSImageConfig {
if actualValue, ok := vlabscsSpec.OSImageConfig[vlabs.Distro(string(k))]; ok {
if v.ImageOffer != actualValue.ImageOffer {
t.Errorf("incorrect ImageOffer for '%s', expect: '%s', actual: '%s'", string(k), v.ImageOffer, actualValue.ImageOffer)
}
if v.ImagePublisher != actualValue.ImagePublisher {
t.Errorf("incorrect ImagePublisher for '%s', expect: '%s', actual: '%s'", string(k), v.ImagePublisher, actualValue.ImagePublisher)
}
if v.ImageSku != actualValue.ImageSku {
t.Errorf("incorrect ImageSku for '%s', expect: '%s', actual: '%s'", string(k), v.ImageSku, actualValue.ImageSku)
}
if v.ImageVersion != actualValue.ImageVersion {
t.Errorf("incorrect ImageVersion for '%s', expect: '%s', actual: '%s'", string(k), v.ImageVersion, actualValue.ImageVersion)
}
} else {
t.Errorf("incorrect OSImageConfig: '%s' is missing", string(k))
}
}
}
func TestConvertContainerServiceToVLabs(t *testing.T) {
cs := getDefaultContainerService()
vlabsCS := ConvertContainerServiceToVLabs(cs)
if vlabsCS == nil {
t.Errorf("expected the converted containerService struct to be non-nil")
}
}
func getDefaultContainerService() *ContainerService {
u, _ := url.Parse("http://foobar.com/search")
return &ContainerService{
ID: "sampleID",
Location: "westus2",
Name: "sampleCS",
Plan: &ResourcePurchasePlan{
Name: "sampleRPP",
Product: "sampleProduct",
PromotionCode: "sampleCode",
Publisher: "samplePublisher",
},
Tags: map[string]string{
"foo": "bar",
},
Type: "sampleType",
Properties: &Properties{
WindowsProfile: &WindowsProfile{
AdminUsername: "sampleAdminUsername",
AdminPassword: "sampleAdminPassword",
},
DiagnosticsProfile: &DiagnosticsProfile{
VMDiagnostics: &VMDiagnostics{
Enabled: true,
StorageURL: u,
},
},
LinuxProfile: &LinuxProfile{
AdminUsername: "azureuser",
SSH: struct {
PublicKeys []PublicKey `json:"publicKeys"`
}{
PublicKeys: []PublicKey{
{
KeyData: ValidSSHPublicKey,
},
},
},
Secrets: []KeyVaultSecrets{
{
SourceVault: &KeyVaultID{
ID: "sampleKeyVaultID",
},
VaultCertificates: []KeyVaultCertificate{
{
CertificateURL: "FooCertURL",
CertificateStore: "BarCertStore",
},
},
},
},
CustomNodesDNS: &CustomNodesDNS{
DNSServer: "SampleDNSServer",
},
CustomSearchDomain: &CustomSearchDomain{
Name: "FooCustomSearchDomain",
RealmUser: "sampleRealmUser",
RealmPassword: "sampleRealmPassword",
},
},
ServicePrincipalProfile: &ServicePrincipalProfile{
ClientID: "fooClientID",
Secret: "fooSecret",
ObjectID: "fooObjectID",
KeyvaultSecretRef: &KeyvaultSecretRef{
VaultID: "fooVaultID",
SecretName: "fooSecretName",
SecretVersion: "fooSecretVersion",
},
},
ExtensionProfiles: []*ExtensionProfile{
{
Name: "fooExtension",
Version: "fooVersion",
ExtensionParameters: "fooExtensionParameters",
ExtensionParametersKeyVaultRef: &KeyvaultSecretRef{
VaultID: "fooVaultID",
SecretName: "fooSecretName",
SecretVersion: "fooSecretVersion",
},
RootURL: "fooRootURL",
Script: "fooSsript",
URLQuery: "fooURL",
},
},
JumpboxProfile: &JumpboxProfile{
OSType: "Linux",
DNSPrefix: "blueorange",
FQDN: "blueorange.westus2.com",
},
CertificateProfile: &CertificateProfile{
CaCertificate: "SampleCACert",
CaPrivateKey: "SampleCAPrivateKey",
APIServerCertificate: "SampleAPIServerCert",
APIServerPrivateKey: "SampleAPIServerPrivateKey",
ClientCertificate: "SampleClientCert",
ClientPrivateKey: "SampleClientPrivateKey",
KubeConfigCertificate: "SampleKubeConfigCert",
KubeConfigPrivateKey: "SampleKubeConfigPrivateKey",
EtcdClientCertificate: "SampleEtcdClientCert",
EtcdClientPrivateKey: "SampleEtcdClientPrivateKey",
EtcdServerCertificate: "SampleEtcdServerCert",
EtcdServerPrivateKey: "SampleEtcdServerPrivateKey",
},
FeatureFlags: &FeatureFlags{
EnableCSERunInBackground: true,
BlockOutboundInternet: false,
EnableTelemetry: false,
},
AADProfile: &AADProfile{
ClientAppID: "SampleClientAppID",
ServerAppID: "ServerAppID",
ServerAppSecret: "ServerAppSecret",
TenantID: "SampleTenantID",
AdminGroupID: "SampleAdminGroupID",
Authenticator: Webhook,
},
CustomProfile: &CustomProfile{
Orchestrator: "Kubernetes",
},
OrchestratorProfile: &OrchestratorProfile{
OrchestratorType: "Kubernetes",
OrchestratorVersion: "1.11.6",
DcosConfig: &DcosConfig{
DcosBootstrapURL: "SampleDcosBootstrapURL",
DcosWindowsBootstrapURL: "SampleWindowsDcosBootstrapURL",
Registry: "SampleRegistry",
RegistryPass: "SampleRegistryPass",
RegistryUser: "SampleRegistryUser",
DcosClusterPackageListID: "SampleDcosClusterPackageListID",
DcosProviderPackageID: "SampleDcosProviderPackageID",
BootstrapProfile: &BootstrapProfile{
VMSize: "Standard_Ds1_v1",
OSDiskSizeGB: 256,
OAuthEnabled: true,
StaticIP: "172.0.0.1",
Subnet: "255.255.255.0",
},
},
KubernetesConfig: &KubernetesConfig{},
},
MasterProfile: &MasterProfile{
Count: 1,
DNSPrefix: "blueorange",
SubjectAltNames: []string{
"fooSubjectAltName",
},
CustomFiles: &[]CustomFile{
{
Source: "sampleCustomFileSource",
Dest: "sampleCustomFileDest",
},
},
VMSize: "Standard_DS1_v1",
OSDiskSizeGB: 256,
VnetSubnetID: "sampleVnetSubnetID",
Subnet: "sampleSubnet",
VnetCidr: "10.240.0.0/8",
AgentVnetSubnetID: "sampleAgentVnetSubnetID",
FirstConsecutiveStaticIP: "10.240.0.0",
IPAddressCount: 5,
StorageProfile: StorageAccount,
HTTPSourceAddressPrefix: "fooHTTPSourceAddressPrefix",
OAuthEnabled: true,
PreprovisionExtension: &Extension{
Name: "sampleExtension",
SingleOrAll: "single",
Template: "{{foobar}}",
},
Extensions: []Extension{
{
Name: "sampleExtension",
SingleOrAll: "single",
Template: "{{foobar}}",
},
},
Distro: Ubuntu,
ImageRef: &ImageReference{
Name: "FooImageRef",
ResourceGroup: "FooImageRefResourceGroup",
},
KubernetesConfig: &KubernetesConfig{
KubernetesImageBase: "quay.io",
ClusterSubnet: "fooClusterSubnet",
NetworkPolicy: "calico",
NetworkPlugin: "azure-cni",
ContainerRuntime: "docker",
MaxPods: 3,
DockerBridgeSubnet: "sampleDockerSubnet",
DNSServiceIP: "172.0.0.1",
ServiceCIDR: "172.0.0.1/16",
UseManagedIdentity: true,
UserAssignedID: "fooUserAssigneID",
UserAssignedClientID: "fooUserAssigneClientID",
MobyVersion: "3.0.0",
CustomHyperkubeImage: "",
ContainerdVersion: "1.2.4",
CustomCcmImage: "sampleCCMImage",
UseCloudControllerManager: to.BoolPtr(true),
CustomWindowsPackageURL: "https://deisartifacts.windows.net",
WindowsNodeBinariesURL: "https://deisartifacts.windows.net",
UseInstanceMetadata: to.BoolPtr(true),
LoadBalancerSku: BasicLoadBalancerSku,
ExcludeMasterFromStandardLB: to.BoolPtr(false),
EnableRbac: to.BoolPtr(true),
EnableSecureKubelet: to.BoolPtr(true),
EnableAggregatedAPIs: true,
EnableDataEncryptionAtRest: to.BoolPtr(true),
EnablePodSecurityPolicy: to.BoolPtr(true),
EnableEncryptionWithExternalKms: to.BoolPtr(true),
GCHighThreshold: 85,
GCLowThreshold: 80,
EtcdVersion: "3.0.0",
EtcdDiskSizeGB: "256",
EtcdEncryptionKey: "sampleEncruptionKey",
AzureCNIVersion: "1.0.33",
AzureCNIURLLinux: "https://mirror.azk8s.cn/kubernetes/azure-container-networking/linux",
AzureCNIURLWindows: "https://mirror.azk8s.cn/kubernetes/azure-container-networking/windows",
KeyVaultSku: "Basic",
MaximumLoadBalancerRuleCount: 3,
ProxyMode: KubeProxyModeIPTables,
PrivateAzureRegistryServer: "sampleRegistryServerURL",
KubeletConfig: map[string]string{
"barKey": "bazValue",
},
Addons: []KubernetesAddon{
{
Name: "sampleAddon",
Enabled: to.BoolPtr(true),
Containers: []KubernetesContainerSpec{
{
Name: "sampleK8sContainer",
Image: "sampleK8sImage",
MemoryRequests: "20Mi",
CPURequests: "10m",
},
},
Config: map[string]string{
"sampleKey": "sampleVal",
},
},
},
APIServerConfig: map[string]string{
"sampleAPIServerKey": "sampleAPIServerVal",
},
ControllerManagerConfig: map[string]string{
"sampleCMKey": "sampleCMVal",
},
CloudControllerManagerConfig: map[string]string{
"sampleCCMKey": "sampleCCMVal",
},
SchedulerConfig: map[string]string{
"sampleSchedulerKey": "sampleSchedulerVal",
},
PrivateCluster: &PrivateCluster{
Enabled: to.BoolPtr(true),
JumpboxProfile: &PrivateJumpboxProfile{
Name: "sampleJumpboxProfile",
VMSize: "Standard_DS1_v2",
OSDiskSizeGB: 512,
Username: "userName",
PublicKey: ValidSSHPublicKey,
StorageProfile: StorageAccount,
},
},
PodSecurityPolicyConfig: map[string]string{
"samplePSPConfigKey": "samplePSPConfigVal",
},
},
},
AgentPoolProfiles: []*AgentPoolProfile{
{
Name: "sampleAgent",
Count: 2,
VMSize: "sampleVM",
DNSPrefix: "blueorange",
FQDN: "blueorange.westus2.com",
OSType: "Linux",
Subnet: "sampleSubnet",
},
{
Name: "sampleAgent-public",
Count: 2,
VMSize: "sampleVM",
DNSPrefix: "blueorange",
FQDN: "blueorange.westus2.com",
OSType: "Linux",
Subnet: "sampleSubnet",
ImageRef: &ImageReference{
Name: "testImage",
ResourceGroup: "testRg",
SubscriptionID: "testSub",
Gallery: "testGallery",
Version: "0.0.1",
},
},
},
},
}
}
func TestConvertOrchestratorVersionProfileToVLabs(t *testing.T) {
ovpK8s := &OrchestratorVersionProfile{
OrchestratorProfile: OrchestratorProfile{
OrchestratorType: Kubernetes,
OrchestratorVersion: "1.9.11",
},
Upgrades: []*OrchestratorProfile{
{
OrchestratorType: Kubernetes,
OrchestratorVersion: "1.10.13",
},
{
OrchestratorType: Kubernetes,
OrchestratorVersion: "1.11.6",
},
},
}
vlabsOvp := ConvertOrchestratorVersionProfileToVLabs(ovpK8s)
if vlabsOvp == nil {
t.Errorf("expected the converted orchestratorVersionProfileToVLabs struct to be non-nil")
}
}
func TestVMSSDiskEncryptionEnabledToVLabs(t *testing.T) {
cs := getDefaultContainerService()
cs.Properties.AgentPoolProfiles[0].EnableVMSSDiskEncryption = to.BoolPtr(true)
vlabsCS := ConvertContainerServiceToVLabs(cs)
if vlabsCS == nil {
t.Errorf("expected the converted containerService struct to be non-nil")
}
if !(*vlabsCS.Properties.AgentPoolProfiles[0].EnableVMSSDiskEncryption) {
t.Errorf("expected the EnableVMSSDiskEncryption flag to be true")
}
}
func TestVMSSDiskEncryptionEnabledDefaultToVLabs(t *testing.T) {
cs := getDefaultContainerService()
vlabsCS := ConvertContainerServiceToVLabs(cs)
if vlabsCS == nil {
t.Errorf("expected the converted containerService struct to be non-nil")
}
if *vlabsCS.Properties.AgentPoolProfiles[0].EnableVMSSDiskEncryption {
t.Errorf("expected the EnableVMSSDiskEncryption flag to be false")
}
}
func TestTelemetryEnabledToVLabs(t *testing.T)
|
func TestTelemetryDefaultToVLabs(t *testing.T) {
cs := getDefaultContainerService()
vlabsCS := ConvertContainerServiceToVLabs(cs)
if vlabsCS == nil {
t.Errorf("expected the converted containerService struct to be non-nil")
}
if vlabsCS.Properties.FeatureFlags.EnableTelemetry {
t.Errorf("expected the EnableTelemetry feature flag to be false")
}
}
func TestPlatformFaultDomainCountToVLabs(t *testing.T) {
cs := getDefaultContainerService()
cs.Properties.MasterProfile.PlatformFaultDomainCount = to.IntPtr(3)
cs.Properties.AgentPoolProfiles[0].PlatformFaultDomainCount = to.IntPtr(5)
vlabsCS := ConvertContainerServiceToVLabs(cs)
if vlabsCS == nil {
t.Errorf("expected the converted containerService struct to be non-nil")
}
if *vlabsCS.Properties.MasterProfile.PlatformFaultDomainCount != 3 {
t.Errorf("expected the master profile platform FD to be 3")
}
if *vlabsCS.Properties.AgentPoolProfiles[0].PlatformFaultDomainCount != 5 {
t.Errorf("expected the agent pool profile platform FD to be 5")
}
}
func TestPlatformUpdateDomainCountToVLabs(t *testing.T) {
cs := getDefaultContainerService()
cs.Properties.MasterProfile.PlatformUpdateDomainCount = to.IntPtr(3)
cs.Properties.AgentPoolProfiles[0].PlatformUpdateDomainCount = to.IntPtr(3)
vlabsCS := ConvertContainerServiceToVLabs(cs)
if vlabsCS == nil {
t.Errorf("expected the converted containerService struct to be non-nil")
}
if *vlabsCS.Properties.MasterProfile.PlatformUpdateDomainCount != 3 {
t.Errorf("expected the master profile platform FD to be 3")
}
if *vlabsCS.Properties.AgentPoolProfiles[0].PlatformUpdateDomainCount != 3 {
t.Errorf("expected the agent pool profile platform FD to be 3")
}
}
func TestConvertTelemetryProfileToVLabs(t *testing.T) {
cs := getDefaultContainerService()
cs.Properties.TelemetryProfile = &TelemetryProfile{
ApplicationInsightsKey: "app_insights_key",
}
vlabsCS := ConvertContainerServiceToVLabs(cs)
if vlabsCS.Properties.TelemetryProfile == nil {
t.Error("expected ConvertContainerServiceToVLabs to set TelemtryProfile")
}
if vlabsCS.Properties.TelemetryProfile.ApplicationInsightsKey != "app_insights_key" {
t.Error("TelemetryProfile.APplicationInsightsKey not converted")
}
}
func TestConvertWindowsProfileToVlabs(t *testing.T) {
falseVar := false
cases := []struct {
name string
w WindowsProfile
expected vlabs.WindowsProfile
}{
{
name: "empty profile",
w: WindowsProfile{},
expected: vlabs.WindowsProfile{
Secrets: []vlabs.KeyVaultSecrets{},
},
},
{
name: "misc fields",
w: WindowsProfile{
AdminUsername: "user",
AdminPassword: "password",
EnableAutomaticUpdates: &falseVar,
ImageVersion: "17763.615.1907121548",
SSHEnabled: false,
WindowsPublisher: "MicrosoftWindowsServer",
WindowsOffer: "WindowsServer",
WindowsSku: "2019-Datacenter-Core-smalldisk",
WindowsDockerVersion: "18.09",
},
expected: vlabs.WindowsProfile{
AdminUsername: "user",
AdminPassword: "password",
EnableAutomaticUpdates: &falseVar,
ImageVersion: "17763.615.1907121548",
SSHEnabled: false,
WindowsPublisher: "MicrosoftWindowsServer",
WindowsOffer: "WindowsServer",
WindowsSku: "2019-Datacenter-Core-smalldisk",
WindowsDockerVersion: "18.09",
Secrets: []vlabs.KeyVaultSecrets{},
},
},
{
name: "image reference",
w: WindowsProfile{
ImageRef: &ImageReference{
Gallery: "gallery",
Name: "name",
ResourceGroup: "rg",
SubscriptionID: "dc6bd10c-110c-4134-88c5-4d5a039129c4",
Version: "1.25.6",
},
},
expected: vlabs.WindowsProfile{
ImageRef: &vlabs.ImageReference{
Gallery: "gallery",
Name: "name",
ResourceGroup: "rg",
SubscriptionID: "dc6bd10c-110c-4134-88c5-4d5a039129c4",
Version: "1.25.6",
},
Secrets: []vlabs.KeyVaultSecrets{},
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
t.Parallel()
actual := vlabs.WindowsProfile{}
convertWindowsProfileToVLabs(&c.w, &actual)
diff := cmp.Diff(actual, c.expected)
if diff != "" {
t.Errorf("unexpected diff testing convertWindowsProfileToVLabs: %s", diff)
}
})
}
}
func TestConvertComponentsToVlabs(t *testing.T) {
k := &KubernetesConfig{
Components: []KubernetesComponent{
{
Name: "component-0",
Enabled: to.BoolPtr(true),
Containers: []KubernetesContainerSpec{
{
Name: "component-0-container-0",
Image: "baz",
CPURequests: "1",
MemoryRequests: "200m",
CPULimits: "2",
MemoryLimits: "400m",
},
{
Name: "component-0-container-1",
Image: "baz-1",
CPURequests: "1-1",
MemoryRequests: "200m-1",
CPULimits: "2-1",
MemoryLimits: "400m-1",
},
},
Config: map[string]string{
"foo": "bar",
"command": "my-command",
},
Data: "my-data",
},
{
Name: "component-1",
Enabled: to.BoolPtr(false),
Containers: []KubernetesContainerSpec{
{
Name: "component-1-container-0",
Image: "baz",
CPURequests: "1",
MemoryRequests: "200m",
CPULimits: "2",
MemoryLimits: "400m",
},
{
Name: "component-1-container-1",
Image: "baz-1",
CPURequests: "1-1",
MemoryRequests: "200m-1",
CPULimits: "2-1",
MemoryLimits: "400m-1",
},
},
Config: map[string]string{
"foo": "bar",
"command": "my-command",
},
Data: "my-data",
},
},
}
vk := &vlabs.KubernetesConfig{}
convertComponentsToVlabs(k, vk)
for i, component := range k.Components {
if vk.Components[i].Name != component.Name {
t.Errorf("unexpected Component.Name property %s after convertComponentsToVlabs conversion, expected %s", vk.Components[i].Name, component.Name)
}
if to.Bool(vk.Components[i].Enabled) != to.Bool(component.Enabled) {
t.Errorf("unexpected Component.Enabled property %t after convertComponentsToVlabs conversion, expected %t", to.Bool(vk.Components[i].Enabled), to.Bool(component.Enabled))
}
if vk.Components[i].Data != component.Data {
t.Errorf("unexpected Component.Data property %s after convertComponentsToVlabs conversion, expected %s", vk.Components[i].Data, component.Data)
}
for j, container := range component.Containers {
if vk.Components[i].Containers[j].Name != container.Name {
t.Errorf("unexpected Container.Name property %s after convertComponentsToVlabs conversion, expected %s", vk.Components[i].Containers[j].Name, container.Name)
}
if vk.Components[i].Containers[j].Image != container.Image {
t.Errorf("unexpected Container.Image property %s after convertComponentsToVlabs conversion, expected %s", vk.Components[i].Containers[j].Image, container.Image)
}
if vk.Components[i].Containers[j].CPURequests != container.CPURequests {
t.Errorf("unexpected Container.CPURequests property %s after convertComponentsToVlabs conversion, expected %s", vk.Components[i].Containers[j].CPURequests, container.CPURequests)
}
if vk.Components[i].Containers[j].MemoryRequests != container.MemoryRequests {
t.Errorf("unexpected Container.MemoryRequests property %s after convertComponentsToVlabs conversion, expected %s", vk.Components[i].Containers[j].MemoryRequests, container.MemoryRequests)
}
if vk.Components[i].Containers[j].CPULimits != container.CPULimits {
t.Errorf("unexpected Container.CPULimits property %s after convertComponentsToVlabs conversion, expected %s", vk.Components[i].Containers[j].CPULimits, container.CPULimits)
}
if vk.Components[i].Containers[j].MemoryLimits != container.MemoryLimits {
t.Errorf("unexpected Container.MemoryLimits property %s after convertComponentsToVlabs conversion, expected %s", vk.Components[i].Containers[j].MemoryLimits, container.MemoryLimits)
}
}
for key, val := range component.Config {
if vk.Components[i].Config[key] != val {
t.Errorf("unexpected Component.Config %s=%s after convertComponentsToVlabs conversion, expected %s=%s", key, vk.Components[i].Config[key], key, val)
}
}
for key, val := range vk.Components[i].Config {
if component.Config[key] != val {
t.Errorf("unexpected Component.Config %s=%s after convertComponentsToVlabs conversion, expected %s=%s", key, component.Config[key], key, val)
}
}
}
}
|
{
cs := getDefaultContainerService()
cs.Properties.FeatureFlags.EnableTelemetry = true
vlabsCS := ConvertContainerServiceToVLabs(cs)
if vlabsCS == nil {
t.Errorf("expected the converted containerService struct to be non-nil")
}
if !vlabsCS.Properties.FeatureFlags.EnableTelemetry {
t.Errorf("expected the EnableTelemetry feature flag to be true")
}
}
|
errors.rs
|
use std::fmt;
use url::ParseError as urlParseError;
pub type Result<T> = std::result::Result<T, LeftError>;
#[derive(Debug)]
pub struct LeftError {
pub inner: LeftErrorKind,
}
#[must_use]
pub fn friendly_message(msg: &str) -> LeftError {
LeftError {
inner: LeftErrorKind::UserFriendlyError(msg.to_string()),
}
}
#[derive(Debug)]
pub enum LeftErrorKind {
SerdeParse(serde_json::error::Error),
IoError(std::io::Error),
XdgBaseDirError(xdg::BaseDirectoriesError),
TomlParse(toml::de::Error),
TomlSerialize(toml::ser::Error),
ReqwestError(reqwest::Error),
StreamError(),
NoneError(),
UserFriendlyError(String),
GitError(git2::Error),
Generic(String),
ParseIntError(core::num::ParseIntError),
SemVerError(semver::Error),
UrlParseError(url::ParseError),
}
impl fmt::Display for LeftError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
return write!(f, "{}", self.inner);
}
}
impl fmt::Display for LeftErrorKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
LeftErrorKind::SerdeParse(ref err) => return write!(f, "{}", err),
LeftErrorKind::UserFriendlyError(ref err) | LeftErrorKind::Generic(ref err) => {
return write!(f, "{}", err)
}
LeftErrorKind::IoError(ref err) => return write!(f, "{}", err),
LeftErrorKind::XdgBaseDirError(ref err) => return write!(f, "{}", err),
LeftErrorKind::TomlParse(ref err) => return write!(f, "{}", err),
LeftErrorKind::TomlSerialize(ref err) => return write!(f, "{}", err),
LeftErrorKind::StreamError() => return write!(f, "Stream Error"),
LeftErrorKind::NoneError() => return write!(f, "None Error"),
LeftErrorKind::ReqwestError(ref err) => return write!(f, "Request Error: {}", err),
LeftErrorKind::GitError(ref err) => return write!(f, "{}", err),
LeftErrorKind::ParseIntError(ref err) => return write!(f, "{}", err),
LeftErrorKind::SemVerError(ref err) => return write!(f, "{}", err),
LeftErrorKind::UrlParseError(ref err) => return write!(f, "{}", err),
}
|
}
impl From<LeftErrorKind> for LeftError {
fn from(inner: LeftErrorKind) -> LeftError {
LeftError { inner }
}
}
impl From<serde_json::error::Error> for LeftError {
fn from(inner: serde_json::error::Error) -> LeftError {
LeftErrorKind::SerdeParse(inner).into()
}
}
impl From<std::io::Error> for LeftError {
fn from(inner: std::io::Error) -> LeftError {
LeftErrorKind::IoError(inner).into()
}
}
impl From<xdg::BaseDirectoriesError> for LeftError {
fn from(inner: xdg::BaseDirectoriesError) -> LeftError {
LeftErrorKind::XdgBaseDirError(inner).into()
}
}
impl From<toml::de::Error> for LeftError {
fn from(inner: toml::de::Error) -> LeftError {
LeftErrorKind::TomlParse(inner).into()
}
}
impl From<toml::ser::Error> for LeftError {
fn from(inner: toml::ser::Error) -> LeftError {
LeftErrorKind::TomlSerialize(inner).into()
}
}
impl From<reqwest::Error> for LeftError {
fn from(inner: reqwest::Error) -> LeftError {
LeftErrorKind::ReqwestError(inner).into()
}
}
impl From<&str> for LeftError {
fn from(_s: &str) -> LeftError {
LeftErrorKind::NoneError().into()
}
}
impl From<git2::Error> for LeftError {
fn from(inner: git2::Error) -> LeftError {
LeftErrorKind::GitError(inner).into()
}
}
impl From<core::num::ParseIntError> for LeftError {
fn from(inner: core::num::ParseIntError) -> LeftError {
LeftErrorKind::ParseIntError(inner).into()
}
}
impl From<semver::Error> for LeftError {
fn from(inner: semver::Error) -> LeftError {
LeftErrorKind::SemVerError(inner).into()
}
}
impl From<urlParseError> for LeftError {
fn from(inner: urlParseError) -> LeftError {
LeftErrorKind::UrlParseError(inner).into()
}
}
|
}
|
users.py
|
from flask import Blueprint, jsonify
from flask import render_template
from flask import request
from sqlalchemy import exc
from project.api.utils import authenticate
from project import db
from project.api.models import User
users_blueprint = Blueprint('users', __name__, template_folder='./templates')
@users_blueprint.route('/users/ping', methods=['GET'])
def ping_pong():
return jsonify({
'status': 'success',
'message': 'pong!'
})
@users_blueprint.route('/users', methods=['POST'])
@authenticate
def add_user(resp):
post_data = request.get_json()
response_object = {
'status': 'fail',
'message': 'Invalid payload.'
}
if not post_data:
return jsonify(response_object), 400
username = post_data.get('username')
email = post_data.get('email')
password = post_data.get('password')
try:
user = User.query.filter_by(email=email).first()
if not user:
db.session.add(User(username=username,
email=email,
password=password))
db.session.commit()
response_object['status'] = 'success'
response_object['message'] = f'{email} was added!'
return jsonify(response_object), 201
else:
response_object['message'] = 'Sorry. That email already exists.'
return jsonify(response_object), 400
except (exc.IntegrityError, ValueError) as e:
db.session.rollback()
return jsonify(response_object), 400
@users_blueprint.route('/users/<user_id>', methods=['GET'])
def get_single_user(user_id):
"""获取单个用户详情"""
response_object = {
'status': 'fail',
'message': 'User does not exist'
}
try:
user = User.query.filter_by(id=int(user_id)).first()
if not user:
return jsonify(response_object), 404
else:
response_object = {
'status': 'success',
'data': {
'id': user_id,
'username': user.username,
'email': user.email,
'active': user.active
}
}
return jsonify(response_object), 200
except ValueError:
return jsonify(response_object), 404
@users_blueprint.route('/users', methods=['GET'])
def get_all_users():
"""获取所有用户"""
response_object = {
'status': 'success',
'data': {
'users': [user.to_json() for user in User.query.all()]
}
}
return jsonify(response_object), 200
@users_blueprint.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
|
username = request.form['username']
email = request.form['email']
password = request.form['password']
db.session.add(User(username=username, email=email, password=password))
db.session.commit()
users = User.query.all()
return render_template('index.html', users=users)
|
|
contributors_table_test.rs
|
// Copyright 2020-2021 The Datafuse Authors.
//
// SPDX-License-Identifier: Apache-2.0.
use common_planners::*;
use common_runtime::tokio;
use futures::TryStreamExt;
use crate::datasources::system::*;
use crate::datasources::*;
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_contributors_table() -> anyhow::Result<()>
|
{
let ctx = crate::tests::try_create_context()?;
let table = ContributorsTable::create();
table.read_plan(
ctx.clone(),
&ScanPlan::empty(),
ctx.get_max_threads()? as usize,
)?;
let stream = table.read(ctx).await?;
let result = stream.try_collect::<Vec<_>>().await?;
let block = &result[0];
assert_eq!(block.num_columns(), 1);
Ok(())
}
|
|
mod.rs
|
pub mod callbacks;
pub mod config;
pub mod errors;
pub mod events;
mod resumption;
mod ser;
#[cfg(test)]
mod test_managed_group;
use crate::{
credentials::{Credential, CredentialBundle},
error::ErrorString,
framing::*,
group::*,
key_packages::{KeyPackage, KeyPackageBundle},
messages::{proposals::*, Welcome},
schedule::ResumptionSecret,
tree::{index::LeafIndex, node::Node},
};
use std::collections::HashMap;
use std::io::{Error, Read, Write};
pub use callbacks::*;
pub use config::*;
pub use errors::{
EmptyInputError, InvalidMessageError, ManagedGroupError, PendingProposalsError,
UseAfterEviction,
};
pub use events::*;
pub(crate) use resumption::ResumptionSecretStore;
use ser::*;
/// A `ManagedGroup` represents an [MlsGroup] with
/// an easier, high-level API designed to be used in production. The API exposes
/// high level functions to manage a group by adding/removing members, get the
/// current member list, etc.
///
/// The API is modeled such that it can serve as a direct interface to the
/// Delivery Service. Functions that modify the public state of the group will
/// return a `Vec<MLSMessage>` that can be sent to the Delivery
/// Service directly. Conversely, incoming messages from the Delivery Service
/// can be fed into [process_messages()](`ManagedGroup::process_messages()`).
///
/// A `ManagedGroup` has an internal queue of pending proposals that builds up
/// as new messages are processed. When creating proposals, those messages are
/// not automatically appended to this queue, instead they have to be processed
/// again through [process_messages()](`ManagedGroup::process_messages()`). This
/// allows the Delivery Service to reject them (e.g. if they reference the wrong
/// epoch).
///
/// If incoming messages or applied operations are semantically or syntactically
/// incorrect, an error event will be returned with a corresponding error
/// message and the state of the group will remain unchanged.
///
/// The application policy for the group can be enforced by implementing the
/// validator callback functions and selectively allowing/ disallowing each
/// operation (see [`ManagedGroupCallbacks`])
///
/// Changes to the group state are dispatched as events through callback
/// functions (see [`ManagedGroupCallbacks`]).
#[derive(Debug)]
#[cfg_attr(test, derive(PartialEq))]
pub struct ManagedGroup<'a> {
// CredentialBundle used to sign messages
credential_bundle: &'a CredentialBundle,
// The group configuration. See `ManagedGroupCongig` for more information.
managed_group_config: ManagedGroupConfig,
// the internal `MlsGroup` used for lower level operations. See `MlsGroup` for more
// information.
group: MlsGroup,
// A queue of incoming proposals from the DS for a given epoch. New proposals are added to the
// queue through `process_messages()`. The queue is emptied after every epoch change.
pending_proposals: Vec<MLSPlaintext>,
// Own `KeyPackageBundle`s that were created for update proposals or commits. The vector is
// emptied after every epoch change.
own_kpbs: Vec<KeyPackageBundle>,
// The AAD that is used for all outgoing handshake messages. The AAD can be set through
// `set_aad()`.
aad: Vec<u8>,
// Resumption secret store. This is where the resumption secrets are kept in a rollover list.
resumption_secret_store: ResumptionSecretStore,
// A flag that indicates if the current client is still a member of a group. The value is set
// to `true` upon group creation and is set to `false` when the client gets evicted from the
// group`.
active: bool,
}
impl<'a> ManagedGroup<'a> {
// === Group creation ===
/// Creates a new group from scratch with only the creator as a member.
pub fn new(
credential_bundle: &'a CredentialBundle,
managed_group_config: &ManagedGroupConfig,
group_id: GroupId,
key_package_bundle: KeyPackageBundle,
) -> Result<Self, ManagedGroupError> {
// TODO #141
let group = MlsGroup::new(
&group_id.as_slice(),
key_package_bundle.key_package().ciphersuite_name(),
key_package_bundle,
GroupConfig::default(),
None, /* Initial PSK */
)?;
let resumption_secret_store =
ResumptionSecretStore::new(managed_group_config.number_of_resumption_secrets);
let managed_group = ManagedGroup {
credential_bundle,
managed_group_config: managed_group_config.clone(),
group,
pending_proposals: vec![],
own_kpbs: vec![],
aad: vec![],
resumption_secret_store,
active: true,
};
// Since the state of the group was changed, call the auto-save function
managed_group.auto_save();
Ok(managed_group)
}
/// Creates a new group from a `Welcome` message
pub fn new_from_welcome(
credential_bundle: &'a CredentialBundle,
managed_group_config: &ManagedGroupConfig,
welcome: Welcome,
ratchet_tree: Option<Vec<Option<Node>>>,
key_package_bundle: KeyPackageBundle,
) -> Result<Self, GroupError> {
// TODO #141
let group = MlsGroup::new_from_welcome(welcome, ratchet_tree, key_package_bundle, None)?;
let resumption_secret_store =
ResumptionSecretStore::new(managed_group_config.number_of_resumption_secrets);
let managed_group = ManagedGroup {
credential_bundle,
managed_group_config: managed_group_config.clone(),
group,
pending_proposals: vec![],
own_kpbs: vec![],
aad: vec![],
resumption_secret_store,
active: true,
};
// Since the state of the group was changed, call the auto-save function
managed_group.auto_save();
Ok(managed_group)
}
// === Membership management ===
/// Adds members to the group
///
/// New members are added by providing a `KeyPackage` for each member.
///
/// If successful, it returns a `Vec` of
/// [`MLSMessage`](crate::prelude::MLSMessage) and a
/// [`Welcome`](crate::prelude::Welcome) message.
pub fn add_members(
&mut self,
key_packages: &[KeyPackage],
) -> Result<(Vec<MLSMessage>, Welcome), ManagedGroupError> {
if !self.active {
return Err(ManagedGroupError::UseAfterEviction(UseAfterEviction::Error));
}
if key_packages.is_empty() {
return Err(ManagedGroupError::EmptyInput(EmptyInputError::AddMembers));
}
// Create add proposals by value from key packages
let proposals = key_packages
.iter()
.map(|key_package| {
Proposal::Add(AddProposal {
key_package: key_package.clone(),
})
})
.collect::<Vec<Proposal>>();
let proposals_by_value = &proposals.iter().collect::<Vec<&Proposal>>();
// Include pending proposals
let proposals_by_reference = &self
.pending_proposals
.iter()
.collect::<Vec<&MLSPlaintext>>();
// Create Commit over all proposals
// TODO #141
let (commit, welcome_option, kpb_option) = self.group.create_commit(
&self.aad,
&self.credential_bundle,
proposals_by_reference,
proposals_by_value,
false,
None,
)?;
let welcome = match welcome_option {
Some(welcome) => welcome,
None => {
return Err(ManagedGroupError::LibraryError(
"No secrets to generate commit message.".into(),
))
}
};
// If it was a full Commit, we have to save the KeyPackageBundle for later
if let Some(kpb) = kpb_option {
self.own_kpbs.push(kpb);
}
// Convert MLSPlaintext messages to MLSMessage and encrypt them if required by
// the configuration
let mls_messages = self.plaintext_to_mls_messages(vec![commit])?;
// Since the state of the group was changed, call the auto-save function
self.auto_save();
Ok((mls_messages, welcome))
}
/// Removes members from the group
///
/// Members are removed by providing the index of their leaf in the tree.
///
/// If successful, it returns a `Vec` of
/// [`MLSMessage`](crate::prelude::MLSMessage) and an optional
/// [`Welcome`](crate::prelude::Welcome) message if there were add proposals
/// in the queue of pending proposals.
pub fn remove_members(
&mut self,
members: &[usize],
) -> Result<(Vec<MLSMessage>, Option<Welcome>), ManagedGroupError> {
if !self.active {
return Err(ManagedGroupError::UseAfterEviction(UseAfterEviction::Error));
}
if members.is_empty() {
return Err(ManagedGroupError::EmptyInput(
EmptyInputError::RemoveMembers,
));
}
// Create add proposals by value
let proposals = members
.iter()
.map(|member| {
Proposal::Remove(RemoveProposal {
removed: *member as u32,
})
})
.collect::<Vec<Proposal>>();
let proposals_by_value = &proposals.iter().collect::<Vec<&Proposal>>();
// Include pending proposals
let proposals_by_reference = &self
.pending_proposals
.iter()
.collect::<Vec<&MLSPlaintext>>();
// Create Commit over all proposals
// TODO #141
let (commit, welcome_option, kpb_option) = self.group.create_commit(
&self.aad,
&self.credential_bundle,
proposals_by_reference,
proposals_by_value,
false,
None,
)?;
// It has to be a full Commit and we have to save the KeyPackageBundle for later
if let Some(kpb) = kpb_option {
self.own_kpbs.push(kpb);
} else {
return Err(ManagedGroupError::LibraryError(
"We didn't get a key package for a full commit.".into(),
));
}
// Convert MLSPlaintext messages to MLSMessage and encrypt them if required by
// the configuration
let mls_messages = self.plaintext_to_mls_messages(vec![commit])?;
// Since the state of the group was changed, call the auto-save function
self.auto_save();
Ok((mls_messages, welcome_option))
}
/// Creates proposals to add members to the group
pub fn propose_add_members(
&mut self,
key_packages: &[KeyPackage],
) -> Result<Vec<MLSMessage>, ManagedGroupError> {
if !self.active {
return Err(ManagedGroupError::UseAfterEviction(UseAfterEviction::Error));
}
let plaintext_messages: Vec<MLSPlaintext> = {
let mut messages = vec![];
for key_package in key_packages.iter() {
let add_proposal = self.group.create_add_proposal(
&self.aad,
&self.credential_bundle,
key_package.clone(),
)?;
messages.push(add_proposal);
}
messages
};
let mls_messages = self.plaintext_to_mls_messages(plaintext_messages)?;
// Since the state of the group was changed, call the auto-save function
self.auto_save();
Ok(mls_messages)
}
/// Creates proposals to remove members from the group
pub fn propose_remove_members(
&mut self,
members: &[usize],
) -> Result<Vec<MLSMessage>, ManagedGroupError> {
if !self.active {
return Err(ManagedGroupError::UseAfterEviction(UseAfterEviction::Error));
}
let plaintext_messages: Vec<MLSPlaintext> = {
let mut messages = vec![];
for member in members.iter() {
let remove_proposal = self.group.create_remove_proposal(
&self.aad,
&self.credential_bundle,
LeafIndex::from(*member),
)?;
messages.push(remove_proposal);
}
messages
};
let mls_messages = self.plaintext_to_mls_messages(plaintext_messages)?;
// Since the state of the group was changed, call the auto-save function
self.auto_save();
Ok(mls_messages)
}
/// Leave the group
pub fn leave_group(&mut self) -> Result<Vec<MLSMessage>, ManagedGroupError> {
if !self.active {
return Err(ManagedGroupError::UseAfterEviction(UseAfterEviction::Error));
}
let remove_proposal = self.group.create_remove_proposal(
&self.aad,
&self.credential_bundle,
self.group.tree().own_node_index(),
)?;
self.plaintext_to_mls_messages(vec![remove_proposal])
}
/// Gets the current list of members
pub fn members(&self) -> Vec<Credential> {
let mut members: Vec<Credential> = vec![];
let tree = self.group.tree();
let leaf_count = self.group.tree().leaf_count();
for index in 0..leaf_count.as_usize() {
let leaf = &tree.nodes[LeafIndex::from(index)];
if let Some(leaf_node) = leaf.key_package() {
members.push(leaf_node.credential().clone());
}
}
members
}
// === Process messages ===
/// Processes any incoming messages from the DS (MLSPlaintext &
/// MLSCiphertext) and triggers the corresponding callback functions.
/// Return a list of `GroupEvent` that contain the individual events that
/// occurred while processing messages.
pub fn process_messages(
&mut self,
messages: Vec<MLSMessage>,
) -> Result<Vec<GroupEvent>, ManagedGroupError> {
if !self.active {
return Err(ManagedGroupError::UseAfterEviction(UseAfterEviction::Error));
}
let mut events = Vec::new();
// Iterate over all incoming messages
for message in messages {
// Check the type of message we received
let (plaintext, aad_option) = match message {
// If it is a ciphertext we decrypt it and return the plaintext message
MLSMessage::Ciphertext(ciphertext) => {
let aad = ciphertext.authenticated_data.clone();
match self.group.decrypt(&ciphertext) {
Ok(plaintext) => (plaintext, Some(aad)),
Err(_) => {
events.push(GroupEvent::InvalidMessage(InvalidMessageEvent::new(
InvalidMessageError::InvalidCiphertext(aad.into()),
)));
// Since we cannot decrypt the MLSCiphertext to a MLSPlaintext we move
// to the next message
continue;
}
}
}
// If it is a plaintext message we just return it
MLSMessage::Plaintext(plaintext) => {
// Verify signature & membership tag
// TODO #106: Support external senders
if plaintext.is_proposal()
&& plaintext.sender.is_member()
&& self.group.verify_membership_tag(&plaintext).is_err()
{
events.push(GroupEvent::InvalidMessage(InvalidMessageEvent::new(
InvalidMessageError::MembershipTagMismatch,
)));
// Since the membership tag verification failed, we skip the message
// and go to the next one
continue;
}
(plaintext, None)
}
};
// Save the current member list for validation end events
let indexed_members = self.indexed_members();
// See what kind of message it is
match plaintext.content {
MLSPlaintextContentType::Proposal(_) => {
// Incoming proposals are validated against the application validation
// policy and then appended to the internal `pending_proposal` list.
// TODO #133: Semantic validation of proposals
if self.validate_proposal(
plaintext.content.to_proposal(),
&plaintext.sender.sender,
&indexed_members,
) {
self.pending_proposals.push(plaintext);
} else {
// The proposal was invalid
events.push(GroupEvent::InvalidMessage(InvalidMessageEvent::new(
InvalidMessageError::CommitWithInvalidProposals(
"Invalid proposal".into(),
),
)));
}
}
MLSPlaintextContentType::Commit(ref commit) => {
// Validate inline proposals
if !self.validate_inline_proposals(
&commit.proposals,
&plaintext.sender.sender,
&indexed_members,
) {
// If not all proposals are valid we issue an error event
events.push(GroupEvent::InvalidMessage(InvalidMessageEvent::new(
InvalidMessageError::CommitWithInvalidProposals(
"Not all proposals are valid".into(),
),
)));
// And move on to the next message
continue;
}
// If all proposals were valid, we continue with applying the Commit
// message
let proposals = &self
.pending_proposals
.iter()
.collect::<Vec<&MLSPlaintext>>();
// TODO #141
match self
.group
.apply_commit(&plaintext, proposals, &self.own_kpbs, None)
{
Ok(()) => {
// Since the Commit was applied without errors, we can collect
// all proposals from the Commit and generate events
events.append(&mut self.prepare_events(
self.ciphersuite(),
&commit.proposals,
plaintext.sender.sender,
&indexed_members,
));
// If a Commit has an update path, it is additionally to be treated
// like a commited UpdateProposal.
if commit.has_path() {
events.push(GroupEvent::MemberUpdated(MemberUpdatedEvent::new(
aad_option.unwrap_or_default(),
indexed_members[&plaintext.sender.sender].clone(),
)));
}
// Extract and store the resumption secret for the current epoch
let resumption_secret = self.group.epoch_secrets().resumption_secret();
self.resumption_secret_store
.add(self.group.context().epoch(), resumption_secret.clone());
// We don't need the pending proposals and key package bundles any
// longer
self.pending_proposals.clear();
self.own_kpbs.clear();
}
Err(apply_commit_error) => match apply_commit_error {
GroupError::ApplyCommitError(ApplyCommitError::SelfRemoved) => {
// Prepare events
events.append(&mut self.prepare_events(
self.ciphersuite(),
&commit.proposals,
plaintext.sender.sender,
&indexed_members,
));
// The group is no longer active
self.active = false;
}
GroupError::ApplyCommitError(e) => {
events.push(GroupEvent::InvalidMessage(InvalidMessageEvent::new(
InvalidMessageError::CommitError(e),
)));
}
_ => {
let error_string =
"apply_commit() did not return an ApplyCommitError."
.to_string();
events.push(GroupEvent::Error(ErrorEvent::new(
ManagedGroupError::LibraryError(ErrorString::from(
error_string,
)),
)));
}
},
}
}
MLSPlaintextContentType::Application(ref app_message) => {
// Save the application message as an event
events.push(GroupEvent::ApplicationMessage(
ApplicationMessageEvent::new(
aad_option.unwrap(),
indexed_members[&plaintext.sender()].clone(),
app_message.to_vec(),
),
));
}
}
}
// Since the state of the group was changed, call the auto-save function
self.auto_save();
Ok(events)
}
// === Application messages ===
/// Creates an application message.
/// Returns `ManagedGroupError::UseAfterEviction(UseAfterEviction::Error)`
/// if the member is no longer part of the group.
/// Returns `ManagedGroupError::PendingProposalsExist` if pending proposals
/// exist. In that case `.process_pending_proposals()` must be called first
/// and incoming messages from the DS must be processed afterwards.
pub fn create_message(&mut self, message: &[u8]) -> Result<MLSMessage, ManagedGroupError> {
if !self.active {
return Err(ManagedGroupError::UseAfterEviction(UseAfterEviction::Error));
}
if !self.pending_proposals.is_empty() {
return Err(ManagedGroupError::PendingProposalsExist(
PendingProposalsError::Exists,
));
}
let ciphertext = self.group.create_application_message(
&self.aad,
message,
&self.credential_bundle,
self.configuration().padding_size(),
)?;
// Since the state of the group was changed, call the auto-save function
self.auto_save();
Ok(MLSMessage::Ciphertext(ciphertext))
}
/// Process pending proposals
pub fn process_pending_proposals(
&mut self,
) -> Result<(Vec<MLSMessage>, Option<Welcome>), ManagedGroupError> {
if !self.active {
return Err(ManagedGroupError::UseAfterEviction(UseAfterEviction::Error));
}
// Include pending proposals into Commit
let messages_to_commit: Vec<&MLSPlaintext> = self.pending_proposals.iter().collect();
// Create Commit over all pending proposals
// TODO #141
let (commit, welcome_option, kpb_option) = self.group.create_commit(
&self.aad,
&self.credential_bundle,
&messages_to_commit,
&[],
true,
None,
)?;
// Add the Commit message to the other pending messages
let plaintext_messages = vec![commit];
// If it was a full Commit, we have to save the KeyPackageBundle for later
if let Some(kpb) = kpb_option {
self.own_kpbs.push(kpb);
}
// Convert MLSPlaintext messages to MLSMessage and encrypt them if required by
// the configuration
let mls_messages = self.plaintext_to_mls_messages(plaintext_messages)?;
// Since the state of the group was changed, call the auto-save function
self.auto_save();
Ok((mls_messages, welcome_option))
}
// === Export secrets ===
/// Exports a secret from the current epoch
pub fn export_secret(
&self,
label: &str,
context: &[u8],
key_length: usize,
) -> Result<Vec<u8>, ManagedGroupError> {
if self.active {
Ok(self.group.export_secret(label, context, key_length)?)
} else {
Err(ManagedGroupError::UseAfterEviction(UseAfterEviction::Error))
}
}
/// Returns the authentication secret
pub fn
|
(&self) -> Vec<u8> {
self.group.authentication_secret()
}
/// Returns a resumption secret for a given epoch. If no resumption secret
/// is available `None` is returned.
pub fn get_resumption_secret(&self, epoch: GroupEpoch) -> Option<&ResumptionSecret> {
self.resumption_secret_store.get(epoch)
}
// === Configuration ===
/// Gets the configuration
pub fn configuration(&self) -> &ManagedGroupConfig {
&self.managed_group_config
}
/// Sets the configuration
pub fn set_configuration(&mut self, managed_group_config: &ManagedGroupConfig) {
self.managed_group_config = managed_group_config.clone();
// Since the state of the group was changed, call the auto-save function
self.auto_save();
}
/// Gets the AAD used in the framing
pub fn aad(&self) -> &[u8] {
&self.aad
}
/// Sets the AAD used in the framing
pub fn set_aad(&mut self, aad: &[u8]) {
self.aad = aad.to_vec();
// Since the state of the group was changed, call the auto-save function
self.auto_save();
}
// === Advanced functions ===
/// Returns the group's ciphersuite
pub fn ciphersuite(&self) -> &Ciphersuite {
self.group.ciphersuite()
}
/// Returns whether the own client is still a member of the group or if it
/// was already evicted
pub fn is_active(&self) -> bool {
self.active
}
/// Sets a different `CredentialBundle`
pub fn set_credential_bundle(&mut self, credential_bundle: &'a CredentialBundle) {
self.credential_bundle = credential_bundle;
}
/// Returns own credential
pub fn credential(&self) -> &Credential {
&self.credential_bundle.credential()
}
/// Get group ID
pub fn group_id(&self) -> &GroupId {
self.group.group_id()
}
/// Updates the own leaf node
///
/// A [`KeyPackageBundle`](crate::prelude::KeyPackageBundle) can optionally
/// be provided. If not, a new one will be created on the fly.
///
/// If successful, it returns a `Vec` of
/// [`MLSMessage`](crate::prelude::MLSMessage) and an optional
/// [`Welcome`](crate::prelude::Welcome) message if there were add proposals
/// in the queue of pending proposals.
pub fn self_update(
&mut self,
key_package_bundle_option: Option<KeyPackageBundle>,
) -> Result<(Vec<MLSMessage>, Option<Welcome>), ManagedGroupError> {
if !self.active {
return Err(ManagedGroupError::UseAfterEviction(UseAfterEviction::Error));
}
// If a KeyPackageBundle was provided, create an UpdateProposal
let mut plaintext_messages = if let Some(key_package_bundle) = key_package_bundle_option {
let update_proposal = self.group.create_update_proposal(
&self.aad,
&self.credential_bundle,
key_package_bundle.key_package().clone(),
)?;
self.own_kpbs.push(key_package_bundle);
vec![update_proposal]
} else {
vec![]
};
// Include pending proposals into Commit
let messages_to_commit: Vec<&MLSPlaintext> = self
.pending_proposals
.iter()
.chain(plaintext_messages.iter())
.collect();
// Create Commit over all proposals
// TODO #141
let (commit, welcome_option, kpb_option) = self.group.create_commit(
&self.aad,
&self.credential_bundle,
&messages_to_commit,
&[],
true, /* force_self_update */
None,
)?;
// Add the Commit message to the other pending messages
plaintext_messages.push(commit);
// Take the new KeyPackageBundle and save it for later
let kpb = match kpb_option {
Some(kpb) => kpb,
None => {
return Err(ManagedGroupError::LibraryError(
"We didn't get a key package for a full commit on self update.".into(),
))
}
};
self.own_kpbs.push(kpb);
// Convert MLSPlaintext messages to MLSMessage and encrypt them if required by
// the configuration
let mls_messages = self.plaintext_to_mls_messages(plaintext_messages)?;
// Since the state of the group was changed, call the auto-save function
self.auto_save();
Ok((mls_messages, welcome_option))
}
/// Creates a proposal to update the own leaf node
pub fn propose_self_update(
&mut self,
key_package_bundle_option: Option<KeyPackageBundle>,
) -> Result<Vec<MLSMessage>, ManagedGroupError> {
if !self.active {
return Err(ManagedGroupError::UseAfterEviction(UseAfterEviction::Error));
}
let tree = self.group.tree();
let existing_key_package = tree.own_key_package();
let key_package_bundle = match key_package_bundle_option {
Some(kpb) => kpb,
None => {
let mut key_package_bundle =
KeyPackageBundle::from_rekeyed_key_package(existing_key_package);
key_package_bundle.sign(self.credential_bundle);
key_package_bundle
}
};
let plaintext_messages = vec![self.group.create_update_proposal(
&self.aad,
&self.credential_bundle,
key_package_bundle.key_package().clone(),
)?];
drop(tree);
self.own_kpbs.push(key_package_bundle);
let mls_messages = self.plaintext_to_mls_messages(plaintext_messages)?;
// Since the state of the group was changed, call the auto-save function
self.auto_save();
Ok(mls_messages)
}
/// Returns a list of proposal
pub fn pending_proposals(&self) -> &[MLSPlaintext] {
&self.pending_proposals
}
// === Load & save ===
/// Loads the state from persisted state
pub fn load<R: Read>(
reader: R,
credential_bundle: &'a CredentialBundle,
callbacks: &ManagedGroupCallbacks,
) -> Result<ManagedGroup<'a>, Error> {
let serialized_managed_group: SerializedManagedGroup = serde_json::from_reader(reader)?;
Ok(serialized_managed_group.into_managed_group(credential_bundle, callbacks))
}
/// Persists the state
pub fn save<W: Write>(&self, writer: &mut W) -> Result<(), Error> {
let serialized_managed_group = serde_json::to_string_pretty(self)?;
writer.write_all(&serialized_managed_group.into_bytes())
}
// === Extensions ===
/// Export the Ratchet Tree
pub fn export_ratchet_tree(&self) -> Vec<Option<Node>> {
self.group.tree().public_key_tree_copy()
}
}
// Private methods of ManagedGroup
impl<'a> ManagedGroup<'a> {
/// Converts MLSPlaintext to MLSMessage. Depending on whether handshake
/// message should be encrypted, MLSPlaintext messages are encrypted to
/// MLSCiphertext first.
fn plaintext_to_mls_messages(
&mut self,
mut plaintext_messages: Vec<MLSPlaintext>,
) -> Result<Vec<MLSMessage>, ManagedGroupError> {
let mut out = Vec::with_capacity(plaintext_messages.len());
for plaintext in plaintext_messages.drain(..) {
let msg = match self.configuration().handshake_message_format {
HandshakeMessageFormat::Plaintext => MLSMessage::Plaintext(plaintext),
HandshakeMessageFormat::Ciphertext => {
let ciphertext = self
.group
.encrypt(plaintext, self.configuration().padding_size())?;
MLSMessage::Ciphertext(ciphertext)
}
};
out.push(msg);
}
Ok(out)
}
/// Validate all pending proposals. The function returns `true` only if all
/// proposals are valid.
fn validate_proposal(
&self,
proposal: &Proposal,
sender: &LeafIndex,
indexed_members: &HashMap<LeafIndex, Credential>,
) -> bool {
let sender = &indexed_members[sender];
match proposal {
// Validate add proposals
Proposal::Add(add_proposal) => {
if let Some(validate_add) = self.managed_group_config.callbacks.validate_add {
if !validate_add(&self, sender, add_proposal.key_package.credential()) {
return false;
}
}
}
// Validate remove proposals
Proposal::Remove(remove_proposal) => {
if let Some(validate_remove) = self.managed_group_config.callbacks.validate_remove {
if !validate_remove(
&self,
sender,
&indexed_members[&LeafIndex::from(remove_proposal.removed)],
) {
return false;
}
}
}
// Update proposals don't have validators
Proposal::Update(_) => {}
Proposal::PreSharedKey(_) => {}
Proposal::ReInit(_) => {}
}
true
}
/// Validates the inline proposals from a Commit message
fn validate_inline_proposals(
&self,
proposals: &[ProposalOrRef],
sender: &LeafIndex,
indexed_members: &HashMap<LeafIndex, Credential>,
) -> bool {
for proposal_or_ref in proposals {
match proposal_or_ref {
ProposalOrRef::Proposal(proposal) => {
if !self.validate_proposal(proposal, sender, indexed_members) {
return false;
}
}
ProposalOrRef::Reference(_) => {}
}
}
true
}
/// Prepare the corresponding events for the proposals covered by the
/// Commit
fn prepare_events(
&self,
ciphersuite: &Ciphersuite,
proposals: &[ProposalOrRef],
sender: LeafIndex,
indexed_members: &HashMap<LeafIndex, Credential>,
) -> Vec<GroupEvent> {
let mut events = Vec::new();
// We want to collect the events in the order specified by the committer.
// We convert the pending proposals to a list of references
let pending_proposals_list = self
.pending_proposals
.iter()
.collect::<Vec<&MLSPlaintext>>();
// Build a proposal queue for easier searching
let pending_proposals_queue =
ProposalQueue::from_proposals_by_reference(ciphersuite, &pending_proposals_list);
for proposal_or_ref in proposals {
match proposal_or_ref {
ProposalOrRef::Proposal(proposal) => {
events.push(self.prepare_proposal_event(proposal, sender, indexed_members));
}
ProposalOrRef::Reference(proposal_reference) => {
if let Some(queued_proposal) = pending_proposals_queue.get(proposal_reference) {
events.push(self.prepare_proposal_event(
queued_proposal.proposal(),
queued_proposal.sender().to_leaf_index(),
indexed_members,
));
}
}
}
}
events
}
/// Prepare the corresponding events for the pending proposal list.
fn prepare_proposal_event(
&self,
proposal: &Proposal,
sender: LeafIndex,
indexed_members: &HashMap<LeafIndex, Credential>,
) -> GroupEvent {
let sender_credential = &indexed_members[&sender];
match proposal {
// Add proposals
Proposal::Add(add_proposal) => GroupEvent::MemberAdded(MemberAddedEvent::new(
self.aad.to_vec(),
sender_credential.clone(),
add_proposal.key_package.credential().clone(),
)),
// Update proposals
Proposal::Update(update_proposal) => {
GroupEvent::MemberUpdated(MemberUpdatedEvent::new(
self.aad.to_vec(),
update_proposal.key_package.credential().clone(),
))
}
// Remove proposals
Proposal::Remove(remove_proposal) => {
let removal = Removal::new(
self.credential_bundle.credential().clone(),
sender_credential.clone(),
indexed_members[&LeafIndex::from(remove_proposal.removed)].clone(),
);
GroupEvent::MemberRemoved(MemberRemovedEvent::new(self.aad.to_vec(), removal))
}
// PSK proposals
Proposal::PreSharedKey(psk_proposal) => {
let psk_id = psk_proposal.psk.clone();
GroupEvent::PskReceived(PskReceivedEvent::new(self.aad.to_vec(), psk_id))
}
// ReInit proposals
Proposal::ReInit(reinit_proposal) => {
GroupEvent::ReInit(ReInitEvent::new(self.aad.to_vec(), reinit_proposal.clone()))
}
}
}
/// Auto-save function
fn auto_save(&self) {
if let Some(auto_save) = self.managed_group_config.callbacks.auto_save {
auto_save(&self);
}
}
/// Return a list (LeafIndex, Credential)
fn indexed_members(&self) -> HashMap<LeafIndex, Credential> {
let mut indexed_members = HashMap::new();
let tree = self.group.tree();
let leaf_count = self.group.tree().leaf_count();
for index in 0..leaf_count.as_usize() {
let leaf_index = LeafIndex::from(index);
let leaf = &tree.nodes[leaf_index];
if let Some(leaf_node) = leaf.key_package() {
indexed_members.insert(leaf_index, leaf_node.credential().clone());
}
}
indexed_members
}
}
/// Unified message type
#[derive(PartialEq, Debug, Clone)]
pub enum MLSMessage {
/// An OpenMLS `MLSPlaintext`.
Plaintext(MLSPlaintext),
/// An OpenMLS `MLSCiphertext`.
Ciphertext(MLSCiphertext),
}
impl From<MLSPlaintext> for MLSMessage {
fn from(mls_plaintext: MLSPlaintext) -> Self {
MLSMessage::Plaintext(mls_plaintext)
}
}
impl From<MLSCiphertext> for MLSMessage {
fn from(mls_ciphertext: MLSCiphertext) -> Self {
MLSMessage::Ciphertext(mls_ciphertext)
}
}
impl MLSMessage {
/// Get the group ID as plain byte vector.
pub fn group_id(&self) -> Vec<u8> {
match self {
MLSMessage::Ciphertext(m) => m.group_id.as_slice(),
MLSMessage::Plaintext(m) => m.group_id().as_slice(),
}
}
/// Get the epoch as plain u64.
pub fn epoch(&self) -> u64 {
match self {
MLSMessage::Ciphertext(m) => m.epoch.0,
MLSMessage::Plaintext(m) => m.epoch().0,
}
}
/// Returns `true` if this is a handshake message and `false` otherwise.
pub fn is_handshake_message(&self) -> bool {
match self {
MLSMessage::Ciphertext(m) => m.is_handshake_message(),
MLSMessage::Plaintext(m) => m.is_handshake_message(),
}
}
}
|
authentication_secret
|
layerList.js
|
import React from "react"
import { makeStyles } from "@material-ui/core/styles"
import List from "@material-ui/core/List"
import ListItem from "@material-ui/core/ListItem"
import ListItemIcon from "@material-ui/core/ListItemIcon"
import ListItemSecondaryAction from "@material-ui/core/ListItemSecondaryAction"
import ListItemText from "@material-ui/core/ListItemText"
import Switch from "@material-ui/core/Switch"
import CircularProgress from "@material-ui/core/CircularProgress"
import FlightIcon from "@material-ui/icons/Flight"
import Card from "@material-ui/core/Card"
import { FaSatellite } from "react-icons/fa"
import { GrSatellite } from "react-icons/gr"
import { BsLayers } from "react-icons/bs"
import { MdDateRange } from "react-icons/md"
import { useSelector, useDispatch } from "react-redux"
import Box from "@material-ui/core/Box"
import Accordion from "@material-ui/core/Accordion"
import AccordionSummary from "@material-ui/core/AccordionSummary"
import AccordionDetails from "@material-ui/core/AccordionDetails"
import ExpandMoreIcon from "@material-ui/icons/ExpandMore"
import moment from "moment"
import allActions from "../state/actions"
const useStyles = makeStyles((theme) => ({
root: {
width: "100%",
maxWidth: 360,
backgroundColor: theme.palette.background.paper,
},
}))
export default function LayerList({ campaign }) {
const classes = useStyles()
const state = useSelector((state) => state)
|
let dates = []
for (const [itemIndex, itemValue] of campaign.layers.entries()) {
const layerItems = itemValue
const layers = []
for (const [layerIndex, layerValue] of layerItems.items.entries()) {
let icon = <BsLayers />
let legendImage
let legendUrl = ""
if (layerValue.type === "track") {
legendUrl = campaign.legends["track"].url
} else {
if (campaign.legends[layerValue.shortName]) {
legendUrl = campaign.legends[layerValue.shortName].url
}
}
legendImage = (
<div>
<img className="legend" src={legendUrl} alt="legend" />
</div>
)
if (layerValue.platform === "satellite") {
icon = <FaSatellite />
} else if (layerValue.platform === "air") {
icon = <FlightIcon />
} else if (layerValue.platform === "ground") {
icon = <GrSatellite />
}
if (state.layerStatus.inProgress.indexOf(layerValue.layerId) !== -1) {
icon = <CircularProgress />
}
if (state.layerStatus.loaded.indexOf(layerValue.layerId) !== -1) {
icon = <div style={{ color: "green" }}>{icon}</div>
}
let layerStartTime = moment.utc(layerValue.start).format("HH:mm:ss")
let layerEndTime = moment.utc(layerValue.end).format("HH:mm:ss")
let layerVariable
if (layerValue.variableName) {
layerVariable = (
<span>
{" "}
<i>{"Displaying: "}</i> {layerValue.variableName + (layerValue.unit ? " (" + layerValue.unit + ")" : "")}
</span>
)
}
let layerAvailability
if (layerValue.start && layerValue.end) {
layerAvailability = (
<span>
{" "}
<i>{"Availability: "}</i> {layerStartTime + " - " + layerEndTime}
</span>
)
}
let layerVariableAvailability
if (layerVariable) {
layerVariableAvailability = (
<span style={{ fontSize: 12 }}>
{layerVariable}
{layerVariable && <br />}
{layerAvailability}
</span>
)
} else {
layerVariableAvailability = <span style={{ fontSize: 12 }}>{layerAvailability}</span>
}
layers.push(
<Card key={"primary-card-" + layerIndex} variant="outlined">
<ListItem key={"primary-item-" + layerIndex}>
<ListItemIcon>{icon}</ListItemIcon>
<ListItemText id={`primary-list-label-${layerValue.layerId}`} primary={layerValue.displayName} />
<ListItemSecondaryAction>
<Switch
edge="end"
onChange={() => dispatch(allActions.listActions.handleToggle(layerValue.layerId))}
checked={state.selectedLayers.indexOf(layerValue.layerId) !== -1}
inputProps={{
"aria-labelledby": `switch-list-label-${layerValue.layerId}`,
}}
/>
</ListItemSecondaryAction>
</ListItem>
{state.selectedLayers.indexOf(layerValue.layerId) !== -1 && layerVariableAvailability && (
<ListItem key={"secondary-item-variable" + layerIndex}>
<ListItemText id={`secondary-list-label-${layerValue.layerId}`} primary={layerVariableAvailability}></ListItemText>
</ListItem>
)}
{state.selectedLayers.indexOf(layerValue.layerId) !== -1 && legendImage && (
<ListItem key={"secondary-item-legend" + layerIndex}>
<ListItemText id={`secondary-list-label-${layerValue.layerId}`}>{legendImage}</ListItemText>
</ListItem>
)}
</Card>
)
}
let expanded = false
if (itemIndex === 0) {
expanded = true
}
dates.push(
<Accordion key={"panel" + itemIndex} defaultExpanded={expanded}>
<AccordionSummary expandIcon={<ExpandMoreIcon />} aria-controls="panel1a-content" key={"summary-panel" + itemIndex}>
<div style={{ width: "100%" }}>
<Box display="flex" justifyContent="center" m={1} p={1}>
<Box p={1}>
<MdDateRange /> {layerItems.date}
</Box>
</Box>
</div>
</AccordionSummary>
<AccordionDetails key={"details-panel" + itemIndex}>
<List key={itemIndex} className={classes.root}>
{layers}
</List>
</AccordionDetails>
</Accordion>
)
}
return dates
}
|
const dispatch = useDispatch()
|
NAS_PDUSESSIONRELEASEREQUESTMessageIdentity_test.go
|
// Copyright 2019 free5GC.org
//
// SPDX-License-Identifier: Apache-2.0
//
package nasType_test
import (
"testing"
"github.com/omec-project/nas/nasType"
"github.com/stretchr/testify/assert"
)
func TestNasTypeNewPDUSESSIONRELEASEREQUESTMessageIdentity(t *testing.T)
|
type nasTypePDUSESSIONRELEASEREQUESTMessageIdentity struct {
in uint8
out uint8
}
var nasTypePDUSESSIONRELEASEREQUESTMessageIdentityTable = []nasTypePDUSESSIONRELEASEREQUESTMessageIdentity{
{0x03, 0x03},
}
func TestNasTypePDUSESSIONRELEASEREQUESTMessageIdentityGetSetMessageType(t *testing.T) {
a := nasType.NewPDUSESSIONRELEASEREQUESTMessageIdentity()
for _, table := range nasTypePDUSESSIONRELEASEREQUESTMessageIdentityTable {
a.SetMessageType(table.in)
assert.Equal(t, table.out, a.GetMessageType())
}
}
type PDUSESSIONRELEASEREQUESTMessageIdentityTestDataTemplate struct {
in nasType.PDUSESSIONRELEASEREQUESTMessageIdentity
out nasType.PDUSESSIONRELEASEREQUESTMessageIdentity
}
var PDUSESSIONRELEASEREQUESTMessageIdentityTestData = []nasType.PDUSESSIONRELEASEREQUESTMessageIdentity{
{0x03},
}
var PDUSESSIONRELEASEREQUESTMessageIdentityExpectedTestData = []nasType.PDUSESSIONRELEASEREQUESTMessageIdentity{
{0x03},
}
var PDUSESSIONRELEASEREQUESTMessageIdentityTable = []PDUSESSIONRELEASEREQUESTMessageIdentityTestDataTemplate{
{PDUSESSIONRELEASEREQUESTMessageIdentityTestData[0], PDUSESSIONRELEASEREQUESTMessageIdentityExpectedTestData[0]},
}
func TestNasTypePDUSESSIONRELEASEREQUESTMessageIdentity(t *testing.T) {
for _, table := range PDUSESSIONRELEASEREQUESTMessageIdentityTable {
a := nasType.NewPDUSESSIONRELEASEREQUESTMessageIdentity()
a.SetMessageType(table.in.GetMessageType())
assert.Equal(t, table.out.GetMessageType(), a.GetMessageType())
}
}
|
{
a := nasType.NewPDUSESSIONRELEASEREQUESTMessageIdentity()
assert.NotNil(t, a)
}
|
route.js
|
import homeRouter from './home-router'
const routes = [
{
path: '/',
name: 'Home',
redirect: '/main',
component: () => import('@/view/home/home'),
children: [...homeRouter],
},
{
path: '/login',
name: 'login',
component: () => import('@/view/login/login'),
},
{
|
path: '*',
},
]
export default routes
|
redirect: '/404',
|
constants.rs
|
use crate::constants::CapabilityFlag::{
CapabilityClientConnAttr, CapabilityClientConnectWithDB, CapabilityClientDeprecateEOF,
CapabilityClientLongFlag, CapabilityClientLongPassword, CapabilityClientMultiResults,
CapabilityClientMultiStatements, CapabilityClientPluginAuth,
CapabilityClientPluginAuthLenencClientData, CapabilityClientProtocol41,
CapabilityClientSecureConnection, CapabilityClientTransactions,
};
// MAX_PACKET_SIZE is the maximum payload length of a packet the server supports.
pub const MAX_PACKET_SIZE: usize = (1 << 24) - 1;
// PROTOCOL_VERSION is current version of the protocol.
// Always 10.
pub const PROTOCOL_VERSION: u8 = 10;
// MYSQL_NATIVE_PASSWORD uses a salt and transmits a hash on the wire.
pub const MYSQL_NATIVE_PASSWORD: &'static str = "mysql_native_password";
// MYSQL_CLEAR_PASSWORD transmits the password in the clear.
pub const MYSQL_CLEAR_PASSWORD: &'static str = "mysql_clear_password";
// MYSQL_DIALOG uses the dialog plugin on the client side. It transmits data in the clear.
pub const MYSQL_DIALOG: &'static str = "dialog";
// See http://dev.mysql.com/doc/internals/en/character-set.html#packet-Protocol::CharacterSet
pub const CHARACTER_SET_UTF8: u8 = 33;
pub const CHARACTER_SET_BINARY: i32 = 63;
// See http://dev.mysql.com/doc/internals/en/status-flags.html
pub const SERVER_STATUS_AUTOCOMMIT: u16 = 0x0002;
// Packet
pub const OK_PACKET: u8 = 0x00;
pub const ERR_PACKET: u8 = 0xff;
pub const EOF_PACKET: u8 = 0xff;
//flags
pub const SERVER_MORE_RESULTS_EXISTS: u16 = 0x0008;
// Originally found in include/mysql/mysql_com.h
#[allow(dead_code)]
pub enum CapabilityFlag {
// CapabilityClientLongPassword is CLIENT_LONG_PASSWORD.
// New more secure passwords. Assumed to be set since 4.1.1.
// We do not check this anywhere.
CapabilityClientLongPassword = 1,
// CapabilityClientFoundRows is CLIENT_FOUND_ROWS.
CapabilityClientFoundRows = 1 << 1,
// CapabilityClientLongFlag is CLIENT_LONG_FLAG.
// Longer flags in Protocol::ColumnDefinition320.
// Set it everywhere, not used, as we use Protocol::ColumnDefinition41.
CapabilityClientLongFlag = 1 << 2,
// CapabilityClientConnectWithDB is CLIENT_CONNECT_WITH_DB.
// One can specify db on connect.
CapabilityClientConnectWithDB = 1 << 3,
// CLIENT_NO_SCHEMA 1 << 4
// Do not permit database.table.column. We do permit it.
// CLIENT_COMPRESS 1 << 5
// We do not support compression. CPU is usually our bottleneck.
// CLIENT_ODBC 1 << 6
// No special behavior since 3.22.
// CLIENT_LOCAL_FILES 1 << 7
// Client can use LOCAL INFILE request of LOAD DATA|XML.
// We do not set it.
// CLIENT_IGNORE_SPACE 1 << 8
// Parser can ignore spaces before '('.
// We ignore this.
// CapabilityClientProtocol41 is CLIENT_PROTOCOL_41.
// New 4.1 protocol. Enforced everywhere.
CapabilityClientProtocol41 = 1 << 9,
// CLIENT_INTERACTIVE 1 << 10
// Not specified, ignored.
// CapabilityClientSSL is CLIENT_SSL.
// Switch to SSL after handshake.
CapabilityClientSSL = 1 << 11,
// CLIENT_IGNORE_SIGPIPE 1 << 12
// Do not issue SIGPIPE if network failures occur (libmysqlclient only).
// CapabilityClientTransactions is CLIENT_TRANSACTIONS.
// Can send status flags in EOF_Packet.
// This flag is optional in 3.23, but always set by the server since 4.0.
// We just do it all the time.
CapabilityClientTransactions = 1 << 13,
// CLIENT_RESERVED 1 << 14
// CapabilityClientSecureConnection is CLIENT_SECURE_CONNECTION.
// New 4.1 authentication. Always set, expected, never checked.
CapabilityClientSecureConnection = 1 << 15,
// CapabilityClientMultiStatements is CLIENT_MULTI_STATEMENTS
// Can handle multiple statements per ComQuery and ComStmtPrepare.
CapabilityClientMultiStatements = 1 << 16,
// CapabilityClientMultiResults is CLIENT_MULTI_RESULTS
// Can send multiple resultsets for ComQuery.
CapabilityClientMultiResults = 1 << 17,
// CapabilityClientPluginAuth is CLIENT_PLUGIN_AUTH.
// Client supports plugin authentication.
CapabilityClientPluginAuth = 1 << 19,
// CapabilityClientConnAttr is CLIENT_CONNECT_ATTRS
// Permits connection attributes in Protocol::HandshakeResponse41.
CapabilityClientConnAttr = 1 << 20,
// CapabilityClientPluginAuthLenencClientData is CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA
CapabilityClientPluginAuthLenencClientData = 1 << 21,
// CLIENT_CAN_HANDLE_EXPIRED_PASSWORDS 1 << 22
// Announces support for expired password extension.
// Not yet supported.
// CLIENT_SESSION_TRACK 1 << 23
// Can set SERVER_SESSION_STATE_CHANGED in the Status Flags
// and send session-state change data after a OK packet.
// Not yet supported.
// CapabilityClientDeprecateEOF is CLIENT_DEPRECATE_EOF
// Expects an OK (instead of EOF) after the resultset rows of a Text Resultset.
CapabilityClientDeprecateEOF = 1 << 24,
}
// See https://dev.mysql.com/doc/internals/en/command-phase.html
#[derive(Copy, Clone)]
pub enum PacketType {
ComSleep,
ComQuit,
ComInitDB,
ComQuery,
ComFieldList,
ComCreateDb,
ComDropDb,
ComRefresh,
ComShutdown,
ComStatistics,
ComProcessInfo,
ComConnect,
ComProcessKill,
ComDebug,
ComPing,
ComTime,
ComDelayedInsert,
ComChangeUser,
ComBinlogDump,
ComTableDump,
ComConnectOut,
ComRegisterSlave,
ComStmtPrepare,
ComStmtExecute,
ComStmtSendLongData,
ComStmtClose,
ComStmtReset,
ComSetOption,
ComStmtFetch,
ComDaemon,
ComBinlogDumpGtid,
ComResetConnection,
}
impl Into<&'static str> for PacketType {
fn into(self) -> &'static str {
return match self {
PacketType::ComSleep => "COM_SLEEP",
PacketType::ComQuit => "COM_QUIT",
PacketType::ComInitDB => "COM_INIT_DB",
PacketType::ComQuery => "COM_QUERY",
PacketType::ComFieldList => "COM_FIELD_LIST",
PacketType::ComCreateDb => "COM_CREATE_DB",
PacketType::ComDropDb => "COM_DROP_DB",
PacketType::ComRefresh => "COM_REFRESH",
PacketType::ComShutdown => "COM_SHUTDOWN",
PacketType::ComStatistics => "COM_STATISTICS",
PacketType::ComProcessInfo => "COM_PROCESS_INFO",
PacketType::ComConnect => "COM_CONNECT",
PacketType::ComProcessKill => "COM_PROCESS_KILL",
PacketType::ComDebug => "COM_DEBUG",
PacketType::ComPing => "COM_PING",
PacketType::ComTime => "COM_TIME",
PacketType::ComDelayedInsert => "COM_DELAYED_INSERT",
PacketType::ComChangeUser => "COM_CHANGE_USER",
PacketType::ComBinlogDump => "COM_BINLOG_DUMP",
PacketType::ComTableDump => "COM_TABLE_DUMP",
PacketType::ComConnectOut => "COM_CONNECT_OUT",
PacketType::ComRegisterSlave => "COM_REGISTER_SLAVE",
PacketType::ComStmtPrepare => "COM_STMT_PREPARE",
PacketType::ComStmtExecute => "COM_STMT_EXECUTE",
PacketType::ComStmtSendLongData => "COM_STMT_SEND_LONG_DATA",
PacketType::ComStmtClose => "COM_STMT_CLOSE",
PacketType::ComStmtReset => "COM_STMT_RESET",
PacketType::ComSetOption => "COM_SET_OPTION",
PacketType::ComStmtFetch => "COM_STMT_FETCH",
PacketType::ComDaemon => "COM_DAEMON",
PacketType::ComBinlogDumpGtid => "COM_BINLOG_DUMP_GTID",
PacketType::ComResetConnection => "COM_RESET_CONNECTION",
};
}
}
impl ToString for PacketType {
fn to_string(&self) -> String {
let c: &'static str = (*self).into();
c.to_string()
}
}
impl Into<u16> for PacketType {
fn into(self) -> u16 {
return match self {
PacketType::ComSleep => 0x00,
PacketType::ComQuit => 0x01,
PacketType::ComInitDB => 0x02,
PacketType::ComQuery => 0x03,
PacketType::ComFieldList => 0x04,
PacketType::ComCreateDb => 0x05,
PacketType::ComDropDb => 0x06,
PacketType::ComRefresh => 0x07,
PacketType::ComShutdown => 0x08,
PacketType::ComStatistics => 0x09,
PacketType::ComProcessInfo => 0x0a,
PacketType::ComConnect => 0x0b,
PacketType::ComProcessKill => 0x0c,
PacketType::ComDebug => 0x0d,
PacketType::ComPing => 0x0e,
PacketType::ComTime => 0x0f,
PacketType::ComDelayedInsert => 0x10,
PacketType::ComChangeUser => 0x11,
PacketType::ComBinlogDump => 0x12,
PacketType::ComTableDump => 0x13,
PacketType::ComConnectOut => 0x14,
PacketType::ComRegisterSlave => 0x15,
PacketType::ComStmtPrepare => 0x16,
PacketType::ComStmtExecute => 0x17,
PacketType::ComStmtSendLongData => 0x18,
PacketType::ComStmtClose => 0x19,
PacketType::ComStmtReset => 0x1a,
PacketType::ComSetOption => 0x1b,
PacketType::ComStmtFetch => 0x1c,
PacketType::ComDaemon => 0x1d,
PacketType::ComBinlogDumpGtid => 0x1e,
PacketType::ComResetConnection => 0x1f,
};
}
}
impl From<u64> for PacketType {
fn from(integer: u64) -> Self {
return match integer {
0x00 => PacketType::ComSleep,
0x01 => PacketType::ComQuit,
0x02 => PacketType::ComInitDB,
0x03 => PacketType::ComQuery,
0x04 => PacketType::ComFieldList,
0x05 => PacketType::ComCreateDb,
0x06 => PacketType::ComDropDb,
0x07 => PacketType::ComRefresh,
0x08 => PacketType::ComShutdown,
0x09 => PacketType::ComStatistics,
0x0a => PacketType::ComProcessInfo,
0x0b => PacketType::ComConnect,
0x0c => PacketType::ComProcessKill,
0x0d => PacketType::ComDebug,
0x0e => PacketType::ComPing,
0x0f => PacketType::ComTime,
0x10 => PacketType::ComDelayedInsert,
0x11 => PacketType::ComChangeUser,
0x12 => PacketType::ComBinlogDump,
0x13 => PacketType::ComTableDump,
0x14 => PacketType::ComConnectOut,
0x15 => PacketType::ComRegisterSlave,
0x16 => PacketType::ComStmtPrepare,
0x17 => PacketType::ComStmtExecute,
0x18 => PacketType::ComStmtSendLongData,
0x19 => PacketType::ComStmtClose,
0x1a => PacketType::ComStmtReset,
0x1b => PacketType::ComSetOption,
0x1c => PacketType::ComStmtFetch,
0x1d => PacketType::ComDaemon,
0x1e => PacketType::ComBinlogDumpGtid,
0x1f => PacketType::ComResetConnection,
_ => {
panic!("Unknown packet type");
}
};
}
}
macro_rules! impl_from {
($t:ty) => {
impl From<$t> for PacketType {
fn from(v: $t) -> Self {
(v as u64).into()
}
}
};
}
impl_from!(u8);
impl_from!(u16);
impl_from!(u32);
impl_from!(usize);
// Error codes for client-side errors.
// Originally found in include/mysql/errmsg.h and
// https://dev.mysql.com/doc/refman/5.7/en/error-messages-client.html
#[allow(dead_code)]
enum ClientError {
// CRUnknownError is CR_UNKNOWN_ERROR
CRUnknownError = 2000,
// CRConnectionError is CR_CONNECTION_ERROR
// This is returned if a connection via a Unix socket fails.
CRConnectionError = 2002,
// CRConnHostError is CR_CONN_HOST_ERROR
// This is returned if a connection via a TCP socket fails.
CRConnHostError = 2003,
// CRServerGone is CR_SERVER_GONE_ERROR.
// This is returned if the client tries to send a command but it fails.
CRServerGone = 2006,
// CRVersionError is CR_VERSION_ERROR
// This is returned if the server versions don't match what we support.
CRVersionError = 2007,
// CRServerHandshakeErr is CR_SERVER_HANDSHAKE_ERR
CRServerHandshakeErr = 2012,
// CRServerLost is CR_SERVER_LOST.
// Used when:
// - the client cannot write an initial auth packet.
// - the client cannot read an initial auth packet.
// - the client cannot read a response from the server.
CRServerLost = 2013,
// CRCommandsOutOfSync is CR_COMMANDS_OUT_OF_SYNC
// Sent when the streaming calls are not done in the right order.
CRCommandsOutOfSync = 2014,
// CRNamedPipeStateError is CR_NAMEDPIPESETSTATE_ERROR.
// This is the highest possible number for a connection error.
CRNamedPipeStateError = 2018,
// CRCantReadCharset is CR_CANT_READ_CHARSET
CRCantReadCharset = 2019,
// CRSSLConnectionError is CR_SSL_CONNECTION_ERROR
CRSSLConnectionError = 2026,
// CRMalformedPacket is CR_MALFORMED_PACKET
CRMalformedPacket = 2027,
}
// Error codes for server-side errors.
// Originally found in include/mysql/mysqld_error.h and
// https://dev.mysql.com/doc/refman/5.7/en/error-messages-server.html
// The below are in sorted order by value, grouped by vterror code they should be bucketed into.
// See above reference for more information on each code.
#[allow(dead_code)]
pub enum ServerError {
// unknown
ERUnknownError = 1105,
// unimplemented
ERNotSupportedYet = 1235,
// resource exhausted
ERDiskFull = 1021,
EROutOfMemory = 1037,
EROutOfSortMemory = 1038,
ERConCount = 1040,
EROutOfResources = 1041,
ERRecordFileFull = 1114,
ERHostIsBlocked = 1129,
ERCantCreateThread = 1135,
ERTooManyDelayedThreads = 1151,
ERNetPacketTooLarge = 1153,
ERTooManyUserConnections = 1203,
ERLockTableFull = 1206,
ERUserLimitReached = 1226,
// deadline exceeded
ERLockWaitTimeout = 1205,
// unavailable
ERServerShutdown = 1053,
// not found
ERFormNotFound = 1029,
ERKeyNotFound = 1032,
ERBadFieldError = 1054,
ERNoSuchThread = 1094,
ERUnknownTable = 1109,
ERCantFindUDF = 1122,
ERNonExistingGrant = 1141,
ERNoSuchTable = 1146,
ERNonExistingTableGrant = 1147,
ERKeyDoesNotExist = 1176,
// permissions
ERDBAccessDenied = 1044,
ERAccessDeniedError = 1045,
ERKillDenied = 1095,
ERNoPermissionToCreateUsers = 1211,
ERSpecifiedAccessDenied = 1227,
// failed precondition
ERNoDb = 1046,
ERNoSuchIndex = 1082,
ERCantDropFieldOrKey = 1091,
ERTableNotLockedForWrite = 1099,
ERTableNotLocked = 1100,
ERTooBigSelect = 1104,
ERNotAllowedCommand = 1148,
ERTooLongString = 1162,
ERDelayedInsertTableLocked = 1165,
ERDupUnique = 1169,
ERRequiresPrimaryKey = 1173,
ERCantDoThisDuringAnTransaction = 1179,
ERReadOnlyTransaction = 1207,
ERCannotAddForeign = 1215,
ERNoReferencedRow = 1216,
ERRowIsReferenced = 1217,
ERCantUpdateWithReadLock = 1223,
ERNoDefault = 1230,
EROperandColumns = 1241,
ERSubqueryNo1Row = 1242,
ERNonUpdateableTable = 1288,
ERFeatureDisabled = 1289,
EROptionPreventsStatement = 1290,
ERDuplicatedValueInType = 1291,
ERRowIsReferenced2 = 1451,
ErNoReferencedRow2 = 1452,
// already exists
ERTableExists = 1050,
ERDupEntry = 1062,
ERFileExists = 1086,
ERUDFExists = 1125,
// aborted
ERGotSignal = 1078,
ERForcingClose = 1080,
ERAbortingConnection = 1152,
ERLockDeadlock = 1213,
// invalid arg
ERUnknownComError = 1047,
ERBadNullError = 1048,
ERBadDb = 1049,
ERBadTable = 1051,
ERNonUniq = 1052,
ERWrongFieldWithGroup = 1055,
ERWrongGroupField = 1056,
ERWrongSumSelect = 1057,
ERWrongValueCount = 1058,
ERTooLongIdent = 1059,
ERDupFieldName = 1060,
ERDupKeyName = 1061,
ERWrongFieldSpec = 1063,
ERParseError = 1064,
EREmptyQuery = 1065,
ERNonUniqTable = 1066,
ERInvalidDefault = 1067,
ERMultiplePriKey = 1068,
ERTooManyKeys = 1069,
ERTooManyKeyParts = 1070,
ERTooLongKey = 1071,
ERKeyColumnDoesNotExist = 1072,
ERBlobUsedAsKey = 1073,
ERTooBigFieldLength = 1074,
ERWrongAutoKey = 1075,
ERWrongFieldTerminators = 1083,
ERBlobsAndNoTerminated = 1084,
ERTextFileNotReadable = 1085,
ERWrongSubKey = 1089,
ERCantRemoveAllFields = 1090,
ERUpdateTableUsed = 1093,
ERNoTablesUsed = 1096,
ERTooBigSet = 1097,
ERBlobCantHaveDefault = 1101,
ERWrongDbName = 1102,
ERWrongTableName = 1103,
ERUnknownProcedure = 1106,
ERWrongParamCountToProcedure = 1107,
ERWrongParametersToProcedure = 1108,
ERFieldSpecifiedTwice = 1110,
ERInvalidGroupFuncUse = 1111,
ERTableMustHaveColumns = 1113,
ERUnknownCharacterSet = 1115,
ERTooManyTables = 1116,
ERTooManyFields = 1117,
ERTooBigRowSize = 1118,
ERWrongOuterJoin = 1120,
ERNullColumnInIndex = 1121,
ERFunctionNotDefined = 1128,
ERWrongValueCountOnRow = 1136,
ERInvalidUseOfNull = 1138,
ERRegexpError = 1139,
ERMixOfGroupFuncAndFields = 1140,
ERIllegalGrantForTable = 1144,
ERSyntaxError = 1149,
ERWrongColumnName = 1166,
ERWrongKeyColumn = 1167,
ERBlobKeyWithoutLength = 1170,
ERPrimaryCantHaveNull = 1171,
ERTooManyRows = 1172,
ERUnknownSystemVariable = 1193,
ERSetConstantsOnly = 1204,
ERWrongArguments = 1210,
ERWrongUsage = 1221,
ERWrongNumberOfColumnsInSelect = 1222,
ERDupArgument = 1225,
ERLocalVariable = 1228,
ERGlobalVariable = 1229,
ERWrongValueForVar = 1231,
ERWrongTypeForVar = 1232,
ERVarCantBeRead = 1233,
ERCantUseOptionHere = 1234,
ERIncorrectGlobalLocalVar = 1238,
ERWrongFKDef = 1239,
ERKeyRefDoNotMatchTableRef = 1240,
ERCyclicReference = 1245,
ERCollationCharsetMismatch = 1253,
ERCantAggregate2Collations = 1267,
ERCantAggregate3Collations = 1270,
ERCantAggregateNCollations = 1271,
ERVariableIsNotStruct = 1272,
ERUnknownCollation = 1273,
ERWrongNameForIndex = 1280,
ERWrongNameForCatalog = 1281,
ERBadFTColumn = 1283,
ERTruncatedWrongValue = 1292,
ERTooMuchAutoTimestampCols = 1293,
ERInvalidOnUpdate = 1294,
ERUnknownTimeZone = 1298,
ERInvalidCharacterString = 1300,
ERIllegalReference = 1247,
ERDerivedMustHaveAlias = 1248,
ERTableNameNotAllowedHere = 1250,
ERQueryInterrupted = 1317,
ERTruncatedWrongValueForField = 1366,
ERDataTooLong = 1406,
ERDataOutOfRange = 1690,
}
// Sql states for errors.
// Originally found in include/mysql/sql_state.h
#[allow(dead_code)]
pub enum StateError {
// SSUnknownSqlstate is ER_SIGNAL_EXCEPTION in
// include/mysql/sql_state.h, but:
// const char *unknown_sqlstate= "HY000"
// in client.c. So using that one.
SSUnknownSQLState,
// SSUnknownComError is ER_UNKNOWN_COM_ERROR
SSUnknownComError,
// SSHandshakeError is ER_HANDSHAKE_ERROR
SSHandshakeError,
// SSServerShutdown is ER_SERVER_SHUTDOWN
SSServerShutdown,
// SSDataTooLong is ER_DATA_TOO_LONG
SSDataTooLong,
// SSDataOutOfRange is ER_DATA_OUT_OF_RANGE
SSDataOutOfRange,
// SSBadNullError is ER_BAD_NULL_ERROR
SSBadNullError,
// SSBadFieldError is ER_BAD_FIELD_ERROR
SSBadFieldError,
// SSDupKey is ER_DUP_KEY
SSDupKey,
// SSCantDoThisDuringAnTransaction is
// ER_CANT_DO_THIS_DURING_AN_TRANSACTION
SSCantDoThisDuringAnTransaction,
// SSAccessDeniedError is ER_ACCESS_DENIED_ERROR
SSAccessDeniedError,
// SSLockDeadlock is ER_LOCK_DEADLOCK
SSLockDeadlock,
}
impl Into<&'static str> for StateError {
fn into(self) -> &'static str
|
}
impl Into<String> for StateError {
fn into(self) -> String {
let s: &'static str = self.into();
s.into()
}
}
// CharacterSetMap maps the charset name (used in ConnParams) to the
// integer value. Interesting ones have their own constant above.
fn convert_character_value(c: &str) -> i32 {
return match c {
"big5" => 1,
"dec8" => 3,
"cp850" => 4,
"hp8" => 6,
"koi8r" => 7,
"latin1" => 8,
"latin2" => 9,
"swe7" => 10,
"ascii" => 11,
"ujis" => 12,
"sjis" => 13,
"hebrew" => 16,
"tis620" => 18,
"euckr" => 19,
"koi8u" => 22,
"gb2312" => 24,
"greek" => 25,
"cp1250" => 26,
"gbk" => 28,
"latin5" => 30,
"armscii8" => 32,
"utf8" => CHARACTER_SET_UTF8 as i32,
"ucs2" => 35,
"cp866" => 36,
"keybcs2" => 37,
"macce" => 38,
"macroman" => 39,
"cp852" => 40,
"latin7" => 41,
"utf8mb4" => 45,
"cp1251" => 51,
"utf16" => 54,
"utf16le" => 56,
"cp1256" => 57,
"cp1257" => 59,
"utf32" => 60,
"binary" => CHARACTER_SET_BINARY,
"geostd8" => 92,
"cp932" => 95,
"eucjpms" => 97,
_ => {
panic!("Unexpected character");
}
};
}
fn is_conn_err(num: i32) -> bool {
(num >= ClientError::CRUnknownError as i32 && num <= ClientError::CRNamedPipeStateError as i32)
|| num == ServerError::ERQueryInterrupted as i32
}
pub const DEFAULT_CLIENT_CAPABILITY: u32 = CapabilityClientLongPassword as u32
| CapabilityClientLongFlag as u32
| CapabilityClientProtocol41 as u32
| CapabilityClientTransactions as u32
| CapabilityClientMultiStatements as u32
| CapabilityClientPluginAuth as u32
| CapabilityClientDeprecateEOF as u32
| CapabilityClientSecureConnection as u32;
pub const DEFAULT_SERVER_CAPABILITY: u32 = CapabilityClientLongPassword as u32
| CapabilityClientLongFlag as u32
| CapabilityClientConnectWithDB as u32
| CapabilityClientProtocol41 as u32
| CapabilityClientTransactions as u32
| CapabilityClientSecureConnection as u32
| CapabilityClientMultiStatements as u32
| CapabilityClientMultiResults as u32
| CapabilityClientPluginAuth as u32
| CapabilityClientPluginAuthLenencClientData as u32
| CapabilityClientDeprecateEOF as u32
| CapabilityClientConnAttr as u32;
pub const DEFAULT_SALT: &'static [u8; 20] = &[
0x77, 0x63, 0x6a, 0x6d, 0x61, 0x22, 0x23, 0x27, // first part
0x38, 0x26, 0x55, 0x58, 0x3b, 0x5d, 0x44, 0x78, 0x53, 0x73, 0x6b, 0x41,
];
pub enum TLSVersion {
VersionTLS10 = 0x0301,
VersionTLS11 = 0x0302,
VersionTLS12 = 0x0303,
VersionTLS13 = 0x0304,
VersionSSL30 = 0x0300,
}
impl From<u64> for TLSVersion {
fn from(ver: u64) -> Self {
match ver {
0x0301 => TLSVersion::VersionTLS10,
0x0302 => TLSVersion::VersionTLS11,
0x0303 => TLSVersion::VersionTLS12,
0x0304 => TLSVersion::VersionTLS13,
0x0300 => TLSVersion::VersionSSL30,
_ => panic!("Unexpected version"),
}
}
}
macro_rules! impl_from_d {
($t:ty,$s:ty) => {
impl From<$t> for $s {
fn from(v: $t) -> Self {
(v as u64).into()
}
}
};
}
impl_from_d!(u8, TLSVersion);
impl_from_d!(u16, TLSVersion);
impl_from_d!(u32, TLSVersion);
impl_from_d!(usize, TLSVersion);
|
{
return match self {
StateError::SSUnknownSQLState => "HY000",
StateError::SSUnknownComError => "08S01",
StateError::SSHandshakeError => "08S01",
StateError::SSServerShutdown => "08S01",
StateError::SSDataTooLong => "22001",
StateError::SSDataOutOfRange => "22003",
StateError::SSBadNullError => "23000",
StateError::SSBadFieldError => "42S22",
StateError::SSDupKey => "23000",
StateError::SSCantDoThisDuringAnTransaction => "25000",
StateError::SSAccessDeniedError => "28000",
StateError::SSLockDeadlock => "40001",
};
}
|
upset.py
|
import itertools
import pandas as pd
from typing import Dict, Set, Hashable
def upset_from_dict_of_sets(inputs: Dict[Hashable, Set[Hashable]]):
''' Given a dictionary of sets, produce input ready for `upsetplot` python package
We produce this input by computing set intersections of all relevant combinations
of sets interacting with one another.
Example:
```python
import upsetplot
from maayanlab_bioinformatics.plotting import upset_from_dict_of_sets
upsetplot.plot(upset_from_dict_of_sets({
'A': {'a', 'b', 'c'},
'B': {'b', 'c', 'd'},
'C': {'d', 'e', 'f'},
}))
|
:return: (pd.DataFrame) in a form ready for `upsetplot.plot`
'''
sets = []
for n in range(1, len(inputs)+1):
if n == 1:
it = [[k] for k in inputs.keys()]
else:
it = map(list, itertools.combinations(inputs.keys(), n))
for V in it:
size = len(inputs[V[0]] if n == 1 else set.intersection(*[inputs[v] for v in V]))
if size > 0:
sets.append(dict({vv: vv in V for vv in inputs.keys()}, size=size))
return pd.DataFrame(sets).groupby(list(inputs.keys()))['size'].sum()
|
```
:param inputs: (Dict[Hashable, Set[Hashable]]) Several named sets
|
receivers.py
|
from __future__ import absolute_import
from sentry.incidents.models import IncidentSuspectCommit
from sentry.signals import release_commits_updated
@release_commits_updated.connect(weak=False)
def handle_release_commits_updated(removed_commit_ids, added_commit_ids, **kwargs):
|
IncidentSuspectCommit.objects.filter(commit_id__in=removed_commit_ids | added_commit_ids)
.values_list("incident_id", flat=True)
.distinct()
)
for incident_id in incident_ids:
calculate_incident_suspects.apply_async(kwargs={"incident_id": incident_id})
|
from sentry.incidents.tasks import calculate_incident_suspects
incident_ids = (
|
repo_test.go
|
package base
import (
"context"
"testing"
)
func
|
(t *testing.T) {
r := NewRepo("https://github.com/gojet/layout-api.git")
if err := r.Clone(context.Background()); err != nil {
t.Fatal(err)
}
if err := r.CopyTo(context.Background(), "/tmp/test_gojet_repo", "github.com/gojet/layout-api", nil); err != nil {
t.Fatal(err)
}
}
|
TestRepo
|
template_builder.go
|
// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
// or more contributor license agreements. Licensed under the Elastic License;
// you may not use this file except in compliance with the Elastic License.
package gcp
import (
"errors"
"fmt"
"path/filepath"
cloudfunctions "google.golang.org/api/cloudfunctions/v1"
yaml "gopkg.in/yaml.v2"
"github.com/elastic/beats/libbeat/common"
"github.com/elastic/beats/libbeat/logp"
"github.com/elastic/beats/x-pack/functionbeat/function/provider"
"github.com/elastic/beats/x-pack/functionbeat/manager/core"
"github.com/elastic/beats/x-pack/functionbeat/manager/core/bundle"
fngcp "github.com/elastic/beats/x-pack/functionbeat/provider/gcp/gcp"
)
const (
runtime = "go111" // Golang 1.11
archiveURL = "gs://%s/%s" // path to the function archive
locationTemplate = "projects/%s/locations/%s" // full name of the location
functionName = locationTemplate + "/functions/%s" // full name of the functions
// Package size limits for GCP provider
// Ref: https://cloud.google.com/functions/quotas
packageCompressedLimit = 100 * 1000 * 1000 // 100MB
packageUncompressedLimit = 500 * 1000 * 1000 // 500MB
)
// defaultTemplateBuilder builds request object when deploying Functionbeat using
// the command deploy.
type defaultTemplateBuilder struct {
provider provider.Provider
log *logp.Logger
gcpConfig *Config
}
type functionData struct {
raw []byte
function *cloudfunctions.CloudFunction
}
// NewTemplateBuilder returns the requested template builder
func NewTemplateBuilder(log *logp.Logger, cfg *common.Config, p provider.Provider) (provider.TemplateBuilder, error) {
gcpCfg := &Config{}
err := cfg.Unpack(gcpCfg)
if err != nil {
return &defaultTemplateBuilder{}, err
}
return &defaultTemplateBuilder{log: log, gcpConfig: gcpCfg, provider: p}, nil
}
func (d *defaultTemplateBuilder) execute(name string) (*functionData, error) {
d.log.Debug("Compressing all assets into an artifact")
fn, err := findFunction(d.provider, name)
if err != nil {
return nil, err
}
resources := zipResourcesOfFunc(fn.Name())
raw, err := core.MakeZip(packageUncompressedLimit, packageCompressedLimit, resources)
if err != nil {
return nil, err
}
d.log.Debugf("Compression is successful (zip size: %d bytes)", len(raw))
return &functionData{
raw: raw,
function: d.cloudFunction(name, fn.Config()),
}, nil
}
func findFunction(p provider.Provider, name string) (installer, error) {
fn, err := p.FindFunctionByName(name)
if err != nil {
return nil, err
}
function, ok := fn.(installer)
if !ok {
return nil, errors.New("incompatible type received, expecting: 'functionManager'")
}
return function, nil
}
func (d *defaultTemplateBuilder) cloudFunction(name string, config *fngcp.FunctionConfig) *cloudfunctions.CloudFunction {
fnName := fmt.Sprintf(functionName, d.gcpConfig.ProjectID, d.gcpConfig.Location, name)
sourceArchiveURL := fmt.Sprintf(archiveURL, d.gcpConfig.FunctionStorage, name)
return &cloudfunctions.CloudFunction{
Name: fnName,
Description: config.Description,
EntryPoint: config.EntryPoint(),
EnvironmentVariables: map[string]string{
"ENABLED_FUNCTIONS": name,
"BEAT_STRICT_PERMS": "false",
},
EventTrigger: &cloudfunctions.EventTrigger{
EventType: config.Trigger.EventType,
Resource: config.Trigger.Resource,
Service: config.Trigger.Service,
},
Labels: config.Labels,
MaxInstances: int64(config.MaxInstances),
Runtime: runtime,
ServiceAccountEmail: config.ServiceAccountEmail,
|
}
// RawTemplate returns the JSON to POST to the endpoint.
func (d *defaultTemplateBuilder) RawTemplate(name string) (string, error) {
fn, err := findFunction(d.provider, name)
if err != nil {
return "", err
}
config := fn.Config()
properties := common.MapStr{
"codeLocation": "pkg/" + fn.Name(),
"codeBucket": d.gcpConfig.FunctionStorage,
"codeBucketObject": "functionbeat.zip",
"location": d.gcpConfig.Location,
"runtime": runtime,
"entryPoint": config.EntryPoint(),
"eventTrigger": config.Trigger,
"environmentVariables": common.MapStr{
"ENABLED_FUNCTIONS": name,
"BEAT_STRICT_PERMS": false,
},
}
if config.Timeout != "" {
properties["timeout"] = config.Timeout
}
if config.MemorySize != "" {
properties["availableMemoryMb"] = config.MemorySize
}
if len(config.ServiceAccountEmail) > 0 {
properties["serviceAccountEmail"] = config.ServiceAccountEmail
}
if len(config.Labels) > 0 {
properties["labels"] = config.Labels
}
if config.MaxInstances > 0 {
properties["maxInstances"] = config.MaxInstances
}
if len(config.VPCConnector) > 0 {
properties["vpcConnector"] = config.VPCConnector
}
output := common.MapStr{
"resources": []common.MapStr{
common.MapStr{
"name": fmt.Sprintf(functionName, d.gcpConfig.ProjectID, d.gcpConfig.Location, name),
"type": "google.cloud.functions.v1.CloudFunction",
"properties": properties,
},
},
}
yamlBytes, err := yaml.Marshal(output)
return string(yamlBytes), err
}
func zipResources() map[string][]bundle.Resource {
functions, err := provider.ListFunctions("gcp")
if err != nil {
fmt.Println(err)
return nil
}
resources := make(map[string][]bundle.Resource)
for _, f := range functions {
resources["gcp-"+f] = zipResourcesOfFunc(f)
}
return resources
}
func zipResourcesOfFunc(typeName string) []bundle.Resource {
root := filepath.Join("pkg", typeName)
vendor := bundle.Folder(filepath.Join("pkg", typeName, "vendor"), filepath.Join("pkg", typeName), 0644)
return append(vendor, &bundle.LocalFile{Path: filepath.Join(root, typeName+".go"), FileMode: 0755})
}
|
SourceArchiveUrl: sourceArchiveURL,
Timeout: config.Timeout,
VpcConnector: config.VPCConnector,
}
|
active_learning.py
|
# Python standard libraries
import argparse
import glob
import json
import logging
import logging.config
import os
import sys
# Non-standard includes
import numpy as np
import tensorflow as tf
# Maybe import tqdm
show_progress = False
try:
import tqdm
show_progress = True
except ImportError:
pass
try:
import tkinter
tkinter.Tk().withdraw()
except ImportError:
if args.unlabelled == None:
pass
else:
raise ImportError("Could not import tkinter, make sukre Tk "
"dependencies are installed")
except Exception as e:
print(e)
pass
# User includes
import models
import datasets
import tensortools as tt
# Lowest representable float32
EPSILON = np.finfo(np.float32).tiny
def main(args, logger):
# Retrieve training parameters for convenience
params = args.params # All parameters
hparams = params["hyperparams"] # Hyperparamters
alparams = params["active_learning"] # Active learning parameters
state = None # State dict
# Define state and config filenames
state_filename = os.path.join(args.log_dir, "state.json")
config_filename = os.path.join(args.log_dir, "config.json")
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
# Dump parameter config
with open(config_filename, "w+") as f:
json.dump(params, f, indent=4)
# Retrieve dataset specific object
if args.dataset == "cityscapes":
dataset = datasets.Cityscapes(coarse=args.coarse)
test_examples_glob = os.path.join(args.data_dir, "val", "*.tfrecord")
elif args.dataset == "freiburg":
dataset = datasets.Freiburg()
test_examples_glob = os.path.join(args.data_dir, "test", "*.tfrecord")
elif args.dataset == "vistas":
dataset = datasets.Vistas()
test_examples_glob = os.path.join(args.data_dir, "val", "*.tfrecord")
else:
raise NotImplementedError("Dataset \"%s\" not supported" % args.dataset)
# Prepare dataset example file paths.
train_examples_glob = os.path.join(args.data_dir, "train", "*.tfrecord")
if not os.path.exists(state_filename):
# Initialize state
# Resolve example filenames
train_val_examples = np.sort(np.array(glob.glob(train_examples_glob)))
# Pick examples from training set to use for validation
val_examples = train_val_examples[:alparams["num_validation"]]
# Use the rest as training examples
train_examples = train_val_examples[alparams["num_validation"]:]
# Use annotated test set, NOTE: cityscapes validation set
test_examples = np.array(glob.glob(test_examples_glob))
# Draw random train examples and mark as annotated
train_indices = np.arange(len(train_examples), dtype=np.int32)
np.random.shuffle(train_indices)
initially_labelled = alparams["num_initially_labelled"]
if initially_labelled < 0:
# Use rest of labelled examples
initially_labelled = len(train_examples)
# Possibly add actually unlabelled examples
no_label_indices = np.empty(0, dtype=str)
if args.unlabelled is not None:
no_label_glob = os.path.join(args.unlabelled, "*.tfrecord")
no_label_examples = glob.glob(no_label_glob)
no_label_indices = np.arange(
len(train_indices), len(train_indices)+len(no_label_examples)
)
train_examples = np.concatenate(train_examples,
no_label_examples)
train_indices = np.concatenate((train_indices, no_label_indices))
labelled = train_indices[:initially_labelled]
unlabelled = train_indices[initially_labelled:]
del train_indices
# Setup initial state
state = {
"checkpoint" : None, # Keep track of latest checkpoint.
"iteration" : 0,
"dataset" : {
"train" : {
"filenames" : list(train_examples),
"labelled" : labelled.tolist(),
"unlabelled" : unlabelled.tolist(),
"no_label" : no_label_indices.tolist()
},
"val" : {
"filenames" : list(val_examples)
},
"test" : {
"filenames" : list(test_examples)
}
}
}
with open(state_filename, "w+") as f:
json.dump(state, f, indent=2)
else:
# Load state
with open(state_filename, "r") as f:
state = json.load(f)
# Extract filename properties
train_examples = np.array(state["dataset"]["train"]["filenames"])
val_examples = np.array(state["dataset"]["val"]["filenames"])
test_examples = np.array(state["dataset"]["test"]["filenames"])
labelled = np.array(state["dataset"]["train"]["labelled"])
unlabelled = np.array(state["dataset"]["train"]["unlabelled"])
no_label_indices = np.array(state["dataset"]["train"]["no_label"])
train_input_labelled = np.full_like(train_examples, False, dtype=bool)
train_input_labelled[labelled] = True
train_input_indices = np.arange(len(train_examples))
with tf.device("/device:CPU:0"):
with tf.name_scope("Datasets"):
# Create input placeholders
train_input = tt.input.NumpyCapsule()
train_input.filenames = train_examples
train_input.labelled = train_input_labelled
train_input.indices = train_input_indices
val_input = tt.input.NumpyCapsule()
val_input.filenames = val_examples
test_input = tt.input.NumpyCapsule()
test_input.filenames = test_examples
# Setup input pipelines
train_input_stage = tt.input.InputStage(
input_shape=[params["network"]["input"]["height"],
params["network"]["input"]["width"]])
# Validation AND Test input stage
val_input_stage = tt.input.InputStage(
input_shape=[params["network"]["input"]["height"],
params["network"]["input"]["width"]])
# Add datasets
train_input_stage.add_dataset_from_placeholders(
"train", train_input.filenames,
train_input.labelled, train_input.indices,
batch_size=params["batch_size"],
augment=True)
# Validation set
val_input_stage.add_dataset_from_placeholders(
"val", val_input.filenames,
batch_size=params["batch_size"])
# Test set
val_input_stage.add_dataset_from_placeholders(
"test", test_input.filenames,
batch_size=params["batch_size"])
# Calculate number of batches in each iterator
val_batches = (len(val_examples) - 1)//params["batch_size"] + 1
test_batches = (len(test_examples) - 1)//params["batch_size"] + 1
# Get iterator outputs
train_image_raw, train_image, train_label, train_mask, \
train_labelled, train_index = train_input_stage.get_output()
val_image, val_label, val_mask = val_input_stage.get_output()
# Create step variables
with tf.variable_scope("StepCounters"):
global_step = tf.Variable(0, dtype=tf.int64,
trainable=False, name="GlobalStep")
local_step = tf.Variable(0, dtype=tf.int64,
trainable=False, name="LocalStep")
global_step_op = tf.assign_add(global_step, local_step)
epoch_step = tf.Variable(0, trainable=False, name="EpochStep")
epoch_step_inc = tf.assign_add(epoch_step, 1)
# Build training- and validation network
regularization = {"drop_rates": hparams["dropout_rates"]}
if hparams["weight_reg"]["L2"] > 0.0 \
or hparams["weight_reg"]["L1"] > 0.0:
regularization = {
"weight_regularization" : tf.keras.regularizers.l1_l2(
l1=hparams["weight_reg"]["L1"],
l2=hparams["weight_reg"]["L2"]),
"regularization_scaling" : hparams["weight_reg"]["glorot_scaling"],
}
# Initialize networks
train_net = models.ENet(
dataset.num_classes,
**regularization
)
val_net = models.ENet(dataset.num_classes)
with tf.device("/device:GPU:0"):
# Build graph for training
train_logits = train_net(train_image, training=True)
# Compute predictions: use @train_pred for metrics and
# @pseudo_label for pseudo_annotation process.
train_pred = tf.math.argmax(train_logits, axis=-1,
name="TrainPredictions")
with tf.name_scope("PseudoAnnotation"):
# Build ops one more time without dropout.
pseudo_logits = train_net(train_image_raw, training=False)
# Just make sure not to propagate gradients a second time.
pseudo_logits = tf.stop_gradient(pseudo_logits)
pseudo_label = tf.math.argmax(pseudo_logits, axis=-1,
name="TrainPredictions")
pseudo_label = tf.cast(pseudo_label, tf.uint8)
# Configure on-line high confidence pseudo labeling.
pseudo_prob = tf.nn.softmax(pseudo_logits, axis=-1, name="TrainProb")
if alparams["measure"] == "entropy":
# Reduce entropy over last dimension.
# Compute prediction entropy
entropy = - pseudo_prob * tf.math.log(pseudo_prob+EPSILON)
entropy = tf.math.reduce_sum(entropy, axis=-1)
# Convert logarithm base to units of number of classes
# NOTE this will make the metric independent of number of
# classes as well the range in [0,1]
log_base = tf.math.log(np.float32(dataset.num_classes))
entropy = entropy / log_base
# Convert entropy to confidence
pseudo_confidence = 1.0 - entropy
elif alparams["measure"] == "margin":
# Difference between the two largest entries in last dimension.
values, indices = tf.math.top_k(pseudo_prob, k=2)
pseudo_confidence = values[:,:,:,0] - values[:,:,:,1]
elif alparams["measure"] == "confidence":
# Reduce max over last dimension.
pseudo_confidence = tf.math.reduce_max(pseudo_prob, axis=-1)
else:
raise NotImplementedError("Uncertainty function not implemented.")
pseudo_mean_confidence = tf.reduce_mean(
tf.cast(pseudo_confidence, tf.float64),
axis=(1,2))
# Pseudo annotate high-confidence unlabeled example pixels
pseudo_mask = tf.where(tf.math.less(pseudo_confidence, alparams["threshold"]),
tf.zeros_like(pseudo_label,
dtype=train_label.dtype),
tf.ones_like(pseudo_label,
dtype=train_label.dtype))
# Pseudo annotation logic (think of it as @tf.cond maped
# over batch dimension)
train_label = tf.where(train_labelled, train_label,
pseudo_label, name="MaybeGenLabel")
train_mask = tf.where(train_labelled, train_mask,
pseudo_mask, name="MaybeGenMask")
with tf.device("/device:GPU:1"):
# Build validation network.
val_logits = val_net(val_image, training=False)
val_pred = tf.math.argmax(val_logits, axis=-1,
name="ValidationPredictions")
# Build cost function
with tf.name_scope("Cost"):
with tf.device("/device:GPU:0"):
# Establish loss function
if hparams["softmax"]["multiscale"]:
loss, loss_weights = \
tt.losses.multiscale_masked_softmax_cross_entropy(
train_label,
train_net.endpoint_outputs[0],
train_mask, dataset.num_classes,
weight=hparams["softmax"]["loginverse_scaling"],
label_smoothing=hparams["softmax"]["label_smoothing"],
scope="XEntropy")
# NOTE: this will make @loss_weights checkpointed
train_net.loss_scale_weights = loss_weights
else:
loss = tt.losses.masked_softmax_cross_entropy(
train_label,
train_logits,
train_mask, dataset.num_classes,
weight=hparams["softmax"]["loginverse_scaling"],
label_smoothing=hparams["softmax"]["label_smoothing"],
scope="XEntropy")
cost = loss
# Add regularization to cost function
if len(train_net.losses) > 0:
regularization_loss = tf.math.add_n(train_net.losses, name="Regularization")
cost += tf.cast(regularization_loss, dtype=tf.float64)
# Setup learning rate
learning_rate = hparams["learning_rate"]
if hparams["learning_rate_decay"] > 0.0:
# Inverse time learning_rate if lr_decay specified
learning_rate = tf.train.inverse_time_decay(
learning_rate, local_step,
decay_steps=train_batches,
decay_rate=hparams["learning_rate_decay"])
# Create optimization procedure
optimizer = tf.train.AdamOptimizer(learning_rate, **hparams["optimizer"]["kwargs"])
# Create training op
train_op = optimizer.minimize(cost, global_step=local_step,
name="TrainOp")
# END tf.device("/device:GPU:0")
# END tf.name_scope("Cost")
# Create summary operations for training and validation network
with tf.name_scope("Summary"):
# Create colormap for image summaries
colormap = tf.constant(dataset.colormap, dtype=tf.uint8,
name="Colormap")
# Create metric evaluation and summaries
with tf.device("/device:GPU:0"):
with tf.name_scope("TrainMetrics"):
# Create metrics object for training network.
train_metrics = tt.metrics.Metrics(train_pred, train_label,
dataset.num_classes, train_mask)
# Get Tensorflow update op.
metric_update_op = train_metrics.get_update_op()
# Get Tensorflow summary operations.
metric_summaries = train_metrics.get_summaries()
train_summary_iter = tf.summary.merge(
[
# Summaries run at each iteration.
tf.summary.scalar("CrossEntropyLoss", loss,
family="Losses"),
tf.summary.scalar("TotalCost", cost,
family="Losses"),
tf.summary.scalar("LearningRate", learning_rate,
family="Losses")
], name="IterationSummaries"
)
with tf.control_dependencies([metric_update_op]):
train_summary_epoch = tf.summary.merge(
[
# Summaries run at epoch boundaries.
metric_summaries["Metrics"],
metric_summaries["ConfusionMat"]
], name="EpochSummaries"
)
train_image_summary = tf.summary.merge(
[
tf.summary.image(
"PseudoLabel/input",
train_image_raw,
family="PseudoLabel"
),
tf.summary.image(
"PseudoLabel/confidence",
tf.expand_dims(pseudo_confidence, axis=-1),
family="PseudoLabel"
),
tf.summary.image(
"PseudoLabel",
tf.gather(dataset.colormap,
tf.cast(pseudo_label*pseudo_mask \
+ (1 - pseudo_mask)*255,
tf.int32)),
family="PseudoLabel"
)
]
)
# Create metric evaluation and summaries
with tf.device("/device:GPU:1"):
with tf.name_scope("ValidationTestMetrics"):
# Create metrics object
val_metrics = tt.metrics.Metrics(val_pred, val_label,
dataset.num_classes, val_mask)
# Get update tensorflow ops
val_metric_update_op = val_metrics.get_update_op()
# Get metric sumaries
val_metric_summaries = val_metrics.get_summaries()
with tf.control_dependencies([val_metric_update_op]):
val_metric_summary = tf.summary.merge(
[
# "Expensive" summaries run at epoch boundaries.
val_metric_summaries["Metrics"],
val_metric_summaries["ClassMetrics"],
val_metric_summaries["ConfusionMat"]
], name="EpochSummaries"
)
val_image_summary = tf.summary.merge(
[
tf.summary.image("Input", val_image),
tf.summary.image("Label", tf.gather(
colormap, tf.cast(val_label + 255*(1-val_mask),
tf.int32))),
tf.summary.image("Predictions", tf.gather(
colormap, tf.cast(val_pred, tf.int32)))
]
)
val_summary_epoch = val_metric_summary
test_summary_epoch = tf.summary.merge([
val_metric_summary,
val_image_summary
]
)
conf_summary_ph = tf.placeholder(tf.float64, shape=[None])
conf_summary = tf.summary.histogram("ConfidenceDistribution",
conf_summary_ph)
# END name_scope("Summary")
# Create session with soft device placement
# - some ops neet to run on the CPU
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
logger.debug("Initializing variables...")
sess.run(tf.global_variables_initializer())
# Create checkpoint object
with tf.name_scope("Checkpoint"):
checkpoint = tf.train.Checkpoint(model=train_net,
epoch=epoch_step,
step=global_step,
optimizer=optimizer)
checkpoint_name = os.path.join(args.log_dir, "model")
if args.checkpoint is not None:
# CMDline checkpoint given
ckpt = args.checkpoint
if os.path.isdir(ckpt):
ckpt = tf.train.latest_checkpoint(ckpt)
if ckpt is None:
logger.error("Checkpoint path \"%s\" is invalid.")
return 1
logger.info("Resuming from checkpoint \"%s\"" % ckpt)
status = checkpoint.restore(ckpt)
if tf.__version__ < "1.14.0":
status.assert_existing_objects_matched()
else:
status.expect_partial()
status.initialize_or_restore(sess)
if args.reinitialize_output:
sess.run(train_net.Final.kernel.initializer)
elif state["checkpoint"] != None:
# Try to restore from checkpoint in logdir
ckpt = state["checkpoint"]
logger.info("Resuming from checkpoint \"%s\"" % ckpt)
status = checkpoint.restore(ckpt)
if tf.__version__ < "1.14.0":
status.assert_existing_objects_matched()
else:
status.expect_partial()
status.initialize_or_restore(sess)
with tf.name_scope("UpdateValidationWeights"):
update_val_op = []
for i in range(len(val_net.layers)):
for j in range(len(val_net.layers[i].variables)):
update_val_op.append(
tf.assign(val_net.layers[i].variables[j],
train_net.layers[i].variables[j]))
update_val_op = tf.group(update_val_op)
ckpt_manager = tt.checkpoint_manager.CheckpointManager(checkpoint,
args.log_dir)
# END scope Checkpoint
# Prepare global fetches dict
fetches = {
"train" : {
"iteration" : {
"step" : global_step_op,
"summary" : train_summary_iter,
"train_op" : train_op,
"update" : metric_update_op,
"updates" : train_net.updates
},
"epoch" : {
"step" : epoch_step,
"summary" : train_summary_epoch,
"summary/image" : train_image_summary
}
},
"val" : { # Validation and test fetches
"iteration" : {
"update" : val_metric_update_op
},
"epoch" : {
"step" : epoch_step,
"MeanIoU" : val_metrics.metrics["MeanIoU"],
"summary" : val_summary_epoch,
# Also add image summary, however only added to
# writer every N epochs.
"summary/image" : val_image_summary
}
},
"test" : {
"iteration" : {"update" : val_metric_update_op},
"epoch" : {"summary" : test_summary_epoch}
}
}
# Train loop (until convergence) -> Pick unlabeled examples -> test_loop
def train_loop(summary_writer):
"""
Train loop closure.
Runs training loop untill no improvement is seen in
@params["epochs"] epochs before returning.
"""
# How many epoch until counting @no_improvement
_initial_grace_period = alparams["epochs/warm_up"]
best_ckpt = state["checkpoint"]
best_mean_iou = 0.0
log_subdir = summary_writer.get_logdir()
run_name = os.path.basename(log_subdir)
checkpoint_prefix = os.path.join(log_subdir, "model")
num_iter_per_epoch = np.maximum(train_input.size,
val_input.size)
no_improvement_count = 0
while no_improvement_count < params["epochs"] \
or _initial_grace_period >= 0:
_initial_grace_period -= 1
# Increment in-graph epoch counter.
epoch = sess.run(epoch_step_inc)
# Prepare inner loop iterator
_iter = range(0, num_iter_per_epoch, params["batch_size"])
if show_progress:
_iter = tqdm.tqdm(_iter, desc="%s[%d]" % (run_name, epoch),
dynamic_ncols=True,
ascii=True,
postfix={"NIC": no_improvement_count})
# Initialize iterators
train_input_stage.init_iterator(
"train", sess, train_input.feed_dict)
val_input_stage.init_iterator(
"val", sess, val_input.feed_dict)
# Reset confusion matrices
train_metrics.reset_metrics(sess)
val_metrics.reset_metrics(sess)
# Prepare iteration fetches
_fetches = {
"train" : {"iteration" : fetches["train"]["iteration"]},
"val" : {"iteration" : fetches["val"]["iteration"]}
}
# Update validation network weights
sess.run(update_val_op)
try:
for i in _iter:
if train_input.size-params["batch_size"] <= i < train_input.size:
# Fetches for last training iteration.
_fetches["train"]["epoch"] = fetches["train"]["epoch"]
if val_input.size-params["batch_size"] <= i < val_input.size:
_fetches["val"]["epoch"] = fetches["val"]["epoch"]
# Run fetches
results = sess.run(_fetches)
if "train" in results.keys():
# Add iteration summary
summary_writer.add_summary(
results["train"]["iteration"]["summary"],
results["train"]["iteration"]["step"])
# Maybe add epoch summary
if "epoch" in results["train"].keys():
summary_writer.add_summary(
results["train"]["epoch"]["summary"],
results["train"]["epoch"]["step"]
)
# Pop fetches to prohibit OutOfRangeError due to
# asymmetric train-/val- input size.
if results["train"]["epoch"]["step"] % 100 == 0:
summary_writer.add_summary(
results["train"]["epoch"]["summary/image"],
results["train"]["epoch"]["step"]
)
_fetches.pop("train")
if "val" in results.keys() and \
"epoch" in results["val"].keys():
# Add summaries to event log.
summary_writer.add_summary(
results["val"]["epoch"]["summary"],
results["val"]["epoch"]["step"]
)
if results["val"]["epoch"]["step"] % 100 == 0:
# Only report image summary every 100th epoch.
summary_writer.add_summary(
results["val"]["epoch"]["summary/image"],
results["val"]["epoch"]["step"]
)
# Check if MeanIoU improved and
# update counter and best
if results["val"]["epoch"]["MeanIoU"] > best_mean_iou:
best_mean_iou = results["val"]["epoch"]["MeanIoU"]
# Update checkpoint file used for
# @tf.train.latest_checkpoint to point at
# current best.
_ckpt_name = ckpt_manager.commit(
checkpoint_prefix, sess)
if _ckpt_name != "":
best_ckpt = _ckpt_name
# Reset counter
no_improvement_count = 0
else:
# Result has not improved, increment counter.
no_improvement_count += 1
if no_improvement_count >= params["epochs"] and \
_initial_grace_period < 0:
_iter.close()
break
if show_progress:
_iter.set_postfix(NIC=no_improvement_count)
# Pop fetches to prohibit OutOfRangeError due to
# asymmetric train-/val- input size.
_fetches.pop("val")
# END "maybe add epoch summary"
except tf.errors.OutOfRangeError:
logger.error("Out of range error. Attempting to continue.")
pass
summary_writer.flush()
ckpt_manager.cache(sess)
# END while no_improvement_count < params["epochs"]
return best_ckpt
|
"""
_step = len(labelled)
# Initialize validation input stage with test set
val_input_stage.init_iterator("test", sess, test_input.feed_dict)
_iter = range(0, test_input.size, params["batch_size"])
if show_progress:
_iter = tqdm.tqdm(_iter, desc="test[%d]" % (_step),
ascii=True,
dynamic_ncols=True)
summary_proto = None
val_metrics.reset_metrics(sess)
try:
for i in _iter:
# Accumulate confusion matrix
if i < test_input.size - params["batch_size"]:
sess.run(fetches["test"]["iteration"]["update"])
else:
# Run summary operation last iteration
_, summary_proto = sess.run([fetches["test"]["iteration"]["update"],
fetches["test"]["epoch"]["summary"]])
except tf.errors.OutOfRangeError:
pass
# Add summary with number of labelled examples as step.
# NOTE this only runs on each major iteration.
summary_writer.add_summary(
summary_proto, _step
)
def rank_confidence():
# Allocate array to store all confidence scores
num_examples = len(state["dataset"]["train"]["filenames"])
confidence = np.zeros(num_examples, dtype=np.float32)
# Initialize input stage
train_input_stage.init_iterator("train", sess,
train_input.feed_dict)
_iter = range(0, train_input.size, params["batch_size"])
if show_progress:
_iter = tqdm.tqdm(_iter, desc="ranking[%d]" % len(labelled),
ascii=True,
dynamic_ncols=True)
try:
for i in _iter:
# Loop over all examples and compute confidence
batch_confidence, batch_indices = sess.run(
[pseudo_mean_confidence, train_index])
# Add to list of confidence
confidence[batch_indices] = batch_confidence
except tf.errors.OutOfRangeError:
pass
# Filter out labelled examples
unlabelled_confidence = confidence[unlabelled]
selection_size = np.minimum(len(unlabelled),
alparams["selection_size"])
# Get the lowest confidence indices of unlabelled subset
example_indices = np.argpartition(unlabelled_confidence,
selection_size)
example_indices = example_indices[:selection_size]
# Convert to indices into all filenames list
low_conf_examples = unlabelled[example_indices]
return low_conf_examples, unlabelled_confidence
checkpoint_path = state["checkpoint"]
# Only add graph to first event file
_graph = sess.graph if checkpoint_path == None else None
with tf.summary.FileWriter(args.log_dir, graph=_graph) as test_writer:
iterations = alparams["iterations"]
if iterations < 0:
# Iterate untill all data is consumed
iterations = np.ceil(len(unlabelled)
/ float(alparams["selection_size"]))
logger.info("Iteration count: %d" % iterations)
while state["iteration"] < iterations:
# Step 1: train_loop
train_input.set_indices(labelled)
if state["iteration"] == 0:
# Pretrain
log_subdir = os.path.join(args.log_dir, "pretrain")
# Only use labelled subset
else:
# Any other iteration
log_subdir = os.path.join(args.log_dir, "iter-%d" %
state["iteration"])
# Sample from the unlabelled set
p = alparams["pseudo_labelling_proportion"]
sample_size = int(len(labelled)*p/(1-p))
sample_size = np.minimum(sample_size, len(unlabelled))
train_input.set_sample_size(sample_size)
# Create subdir if it doesn't exist
if not os.path.exists(log_subdir):
os.mkdir(log_subdir)
# Change checkpoint manager directory
ckpt_manager.chdir(log_subdir)
with tf.summary.FileWriter(log_subdir) as train_val_writer:
# Enter train loop
try:
checkpoint_path = train_loop(train_val_writer)
except KeyboardInterrupt as exception:
# Quickly store state
if ckpt_manager.latest_checkpoint != "":
state["checkpoint"] = ckpt_manager.latest_checkpoint
with open(state_filename, "w") as f:
json.dump(state, f, indent=2)
f.truncate()
raise exception
# Reload best checkpoint
status = checkpoint.restore(checkpoint_path)
status.run_restore_ops(sess)
sess.run(update_val_op)
# Step 2: test_loop
if test_input.size > 0:
# This step may be omitted on deployment
test_loop(test_writer)
# Step 3: Find low confidence examples
# Reset train_input to use all examples for ranking
train_input.set_indices()
if alparams["selection_size"] > 0:
low_conf_examples, unlabelled_conf = rank_confidence()
_hist_summary = sess.run(conf_summary,
{conf_summary_ph:
unlabelled_conf})
test_writer.add_summary(_hist_summary, state["iteration"])
else:
# Draw examples randomly
selection_size = np.minimum(alparams["selection_size"],
len(unlabelled.tolist()))
if selection_size != 0:
low_conf_examples = np.random.choice(
unlabelled, np.abs(alparams["selection_size"]))
else:
low_conf_examples = []
# (maybe) Pause for user to annotate
to_annotate_indices = no_label_indices[np.isin(
no_label_indices, low_conf_examples)]
while len(to_annotate_indices) > 0:
to_annotate = train_examples[to_annotate_indices]
# Poll user for filenames of annotated examples
logger.info("Please annotate the following examples:\n%s" %
"\n".join(to_annotate_basename.tolist()))
filenames = tkinter.filedialog.askopenfilename(
multiple=1,
filetypes=(("TFRecord", "*.tfrecord"),))
hit = [] # List of matching filename indices
for filename in filenames:
basename = os.path.basename(filename)
idx = -1
for i in range(len(to_annotate)):
if to_annotate[i].endswith(basename):
idx = i
break
if idx != -1:
# Update state filenames
train_examples[to_annotate_indices[idx]] = filename
hit.append(idx)
else:
logger.info("Unrecognized filepath: %s" % filename)
# Remove matched paths
to_annotate_indices = np.delete(to_annotate_indices, hit)
# Remove annotated examples from unlabelled set
no_label_indices = no_label_indices[np.isin(no_label_indices,
low_conf_examples,
invert=True)]
logger.info(
"Moving following examples to labelled set:\n%s" %
"\n".join(train_examples[low_conf_examples].tolist())
)
# First make the update to input stage before
# commiting state change
train_input_labelled[low_conf_examples] = True
train_input.labelled = train_input_labelled
# Step 4: Update state information
labelled = np.append(labelled, low_conf_examples)
unlabelled = unlabelled[np.isin(unlabelled, low_conf_examples,
assume_unique=True, invert=True)]
state["dataset"]["train"]["filenames"] = train_examples.tolist()
state["dataset"]["train"]["labelled"] = labelled.tolist()
state["dataset"]["train"]["unlabelled"] = unlabelled.tolist()
state["iteration"] += 1
state["checkpoint"] = checkpoint_path
# Dump updated state
with open(state_filename, "w") as f:
json.dump(state, f, indent=2)
f.truncate()
return 0
class HelpfullParser(argparse.ArgumentParser):
# Prints help instead of usage string on error
def error(self, message):
self.print_help()
self.exit(2, "error: %s\n" % message)
def parse_arguments():
"""
Handles parseing of commandline arguments
:returns: The parsed commandline options
:rtype: argparse.Namespace
"""
# Required arguments
req_parser = argparse.ArgumentParser(add_help=False)
req_group = req_parser.add_argument_group(title="Required arguments")
req_group.add_argument(
"-d", "--data-dir",
required=True,
type=str,
dest="data_dir",
help="Path to dataset root directory"
)
req_group.add_argument(
"-l", "--log-dir",
required=True,
type=str,
dest="log_dir",
metavar="LOG_DIR",
help="Logdirectory for the session."
)
req_group.add_argument(
"-p", "--parameters",
required=True,
type=str,
dest="params",
metavar="PARAM_FILE",
help="Path to parameter configuration file, see conf subdirectory."
)
#Optional arguments
opt_parser = argparse.ArgumentParser(add_help=False)
opt_parser.add_argument(
"-c", "--checkpoint",
type=str,
dest="checkpoint", required=False,
metavar="CHECKPOINT",
help="Path to pretrained checkpoint directory or model."
)
opt_parser.add_argument(
"-r", "--reinitialize-output-layer",
action="store_true",
dest="reinitialize_output", required=False,
help="Reinitialize last layer of model (if checkpoint specified)."
)
opt_parser.add_argument(
"-u", "--unlabelled-dir",
type=str,
default=None,
dest="unlabelled",
metavar="UNLABELLED_GLOB",
help="Path to directory containing only feature data."
)
# Create parser hierarchy
# Top parser
top_parser = argparse.ArgumentParser(
usage="%s {cityscapes,freiburg,vistas} [-h/--help]"
% sys.argv[0])
# Dataset specific parsers inherits required arguments.
data_parsers = top_parser.add_subparsers(parser_class=HelpfullParser)
# Cityscapes dataset
cityscapes = data_parsers.add_parser(
"cityscapes",
usage="%s {cityscapes,freiburg} -d DATA_DIR -l LOG_DIR [options]"
% sys.argv[0],
parents=[req_parser,opt_parser],
conflict_handler="resolve",
help="The Cityscapes dataset.")
cityscapes.set_defaults(dataset="cityscapes")
cityscapes.add_argument("--use-coarse",
action="store_true",
required=False,
dest="coarse")
# Mapillary Vistas dataset
vistas = data_parsers.add_parser(
"vistas",
usage="%s {cityscapes,freiburg,vistas} -d DATA_DIR -l LOG_DIR [options]"
% sys.argv[0],
parents=[req_parser,opt_parser],
conflict_handler="resolve",
help="The Mapillary Vistas dataset.")
vistas.set_defaults(dataset="vistas")
# Freiburg forrest dataset
freiburg = data_parsers.add_parser(
"freiburg",
usage="%s {cityscapes,freiburg} -d DATA_DIR -l LOG_DIR [options]"
% sys.argv[0],
parents=[req_parser,opt_parser],
conflict_handler="resolve",
help="The Freiburg Forest dataset.")
freiburg.set_defaults(dataset="freiburg")
freiburg.add_argument("-m", "--modalities",
type=str,
nargs="+",
required=False,
default=[],
help="Path to Freiburg Forest root directory.")
if not "freiburg" in sys.argv and \
not "cityscapes" in sys.argv and \
not "vistas" in sys.argv:
top_parser.print_help()
sys.exit(0)
args = top_parser.parse_args()
return args
if __name__ == "__main__":
# Get and configure logger
logger = logging.getLogger(__name__)
with open("util/logging.json") as conf:
conf_dict = json.load(conf)
logging.config.dictConfig(conf_dict)
del conf_dict
args = parse_arguments()
# Load parameters
parameters = None
with open(args.params, "r") as f:
parameters = json.load(f)
# Overwrite with parameter dict
args.params = parameters
sys.exit(main(args, logger))
|
def test_loop(summary_writer):
"""
Test loop closure.
|
wrapper.rs
|
use mpegts::packet::Packet;
use mpegts::program_association::*;
use mpegts::program_map::*;
use mpegts::program_descriptor::*;
use mpegts::stream_id::StreamId;
#[derive(Debug)]
pub struct
|
{
pub programs: Vec<Program>
}
impl Wrapper {
pub fn append_data(self, _data: Vec<u8>) -> Vec<Packet> {
let program_map_pid = 256;
let program_number = 1;
let video_pid = 257;
let pat = ProgramAssociation{
transport_stream_id: 0,
table: vec![Association{
program_number: program_number,
program_map_pid: program_map_pid
}],
};
let pmt = ProgramMap{
program_number: program_number,
pcr_pid: video_pid,
programs: vec![Program{
stream_id: StreamId::Itu_T_H265_Video,
elementary_pid: video_pid,
es_info: EsInfo{
descriptor: ProgramDescriptor::Reserved,
hevc: None,
data: vec![]
}
}],
};
let pat_packet = Packet::new_pat(pat);
let pmt_packet = Packet::new_pmt(program_map_pid, pmt);
let mut result = vec![];
result.push(pat_packet);
result.push(pmt_packet);
for _i in 1..6 {
result.push(Packet::new_null());
}
result
}
}
|
Wrapper
|
markdown.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::default::Default;
use std::fs::File;
use std::io::prelude::*;
use std::io;
use std::path::{PathBuf, Path};
use getopts;
use testing;
use rustc::session::search_paths::SearchPaths;
use rustc::session::config::Externs;
use syntax::codemap::DUMMY_SP;
use externalfiles::{ExternalHtml, LoadStringError, load_string};
use html::render::reset_ids;
use html::escape::Escape;
use html::markdown;
use html::markdown::{Markdown, MarkdownWithToc, find_testable_code, old_find_testable_code};
use test::{TestOptions, Collector};
/// Separate any lines at the start of the file that begin with `# ` or `%`.
fn extract_leading_metadata<'a>(s: &'a str) -> (Vec<&'a str>, &'a str) {
let mut metadata = Vec::new();
let mut count = 0;
for line in s.lines() {
if line.starts_with("# ") || line.starts_with("%") {
// trim the whitespace after the symbol
metadata.push(line[1..].trim_left());
count += line.len() + 1;
} else {
return (metadata, &s[count..]);
}
}
// if we're here, then all lines were metadata `# ` or `%` lines.
|
(metadata, "")
}
/// Render `input` (e.g. "foo.md") into an HTML file in `output`
/// (e.g. output = "bar" => "bar/foo.html").
pub fn render(input: &str, mut output: PathBuf, matches: &getopts::Matches,
external_html: &ExternalHtml, include_toc: bool) -> isize {
let input_p = Path::new(input);
output.push(input_p.file_stem().unwrap());
output.set_extension("html");
let mut css = String::new();
for name in &matches.opt_strs("markdown-css") {
let s = format!("<link rel=\"stylesheet\" type=\"text/css\" href=\"{}\">\n", name);
css.push_str(&s)
}
let input_str = match load_string(input) {
Ok(s) => s,
Err(LoadStringError::ReadFail) => return 1,
Err(LoadStringError::BadUtf8) => return 2,
};
if let Some(playground) = matches.opt_str("markdown-playground-url").or(
matches.opt_str("playground-url")) {
markdown::PLAYGROUND.with(|s| { *s.borrow_mut() = Some((None, playground)); });
}
let mut out = match File::create(&output) {
Err(e) => {
let _ = writeln!(&mut io::stderr(),
"rustdoc: {}: {}",
output.display(), e);
return 4;
}
Ok(f) => f
};
let (metadata, text) = extract_leading_metadata(&input_str);
if metadata.is_empty() {
let _ = writeln!(
&mut io::stderr(),
"rustdoc: invalid markdown file: no initial lines starting with `# ` or `%`"
);
return 5;
}
let title = metadata[0];
reset_ids(false);
let rendered = if include_toc {
format!("{}", MarkdownWithToc(text))
} else {
format!("{}", Markdown(text))
};
let err = write!(
&mut out,
r#"<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc">
<title>{title}</title>
{css}
{in_header}
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]-->
{before_content}
<h1 class="title">{title}</h1>
{text}
{after_content}
</body>
</html>"#,
title = Escape(title),
css = css,
in_header = external_html.in_header,
before_content = external_html.before_content,
text = rendered,
after_content = external_html.after_content,
);
match err {
Err(e) => {
let _ = writeln!(&mut io::stderr(),
"rustdoc: cannot write to `{}`: {}",
output.display(), e);
6
}
Ok(_) => 0
}
}
/// Run any tests/code examples in the markdown file `input`.
pub fn test(input: &str, cfgs: Vec<String>, libs: SearchPaths, externs: Externs,
mut test_args: Vec<String>, maybe_sysroot: Option<PathBuf>) -> isize {
let input_str = match load_string(input) {
Ok(s) => s,
Err(LoadStringError::ReadFail) => return 1,
Err(LoadStringError::BadUtf8) => return 2,
};
let mut opts = TestOptions::default();
opts.no_crate_inject = true;
let mut collector = Collector::new(input.to_string(), cfgs, libs, externs,
true, opts, maybe_sysroot, None,
Some(input.to_owned()));
old_find_testable_code(&input_str, &mut collector, DUMMY_SP);
find_testable_code(&input_str, &mut collector, DUMMY_SP);
test_args.insert(0, "rustdoctest".to_string());
testing::test_main(&test_args, collector.tests);
0
}
| |
retry_suite_test.go
|
package retry_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestRetry(t *testing.T) {
RegisterFailHandler(Fail)
|
RunSpecs(t, "Retry Suite")
}
|
|
constants.rs
|
///////////////////////////////////////////////////////////////////////////////
//
// Copyright 2018-2020 Airalab <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////////
//! A set of constant values used in substrate runtime.
/// Money matters.
pub mod currency {
#[cfg(feature = "std")]
use hex_literal::hex;
#[cfg(feature = "std")]
use node_primitives::AccountId;
use node_primitives::Balance;
pub const COASE: Balance = 1_000;
pub const GLUSHKOV: Balance = 1_000 * COASE;
pub const XRT: Balance = 1_000 * GLUSHKOV;
pub const fn
|
(items: u32, bytes: u32) -> Balance {
items as Balance * 150 * GLUSHKOV / 100 + (bytes as Balance) * 60 * GLUSHKOV
}
#[cfg(feature = "std")]
lazy_static::lazy_static! {
pub static ref STAKE_HOLDERS: Vec<(AccountId, Balance)> = sp_std::vec![
(AccountId::from(hex!["5c63763273b539fa6ed09b6b9844553922f7c5eb30195062b139b057ac861568"]), 1000 * XRT),
(AccountId::from(hex!["caafae0aaa6333fcf4dc193146945fe8e4da74aa6c16d481eef0ca35b8279d73"]), 5000 * XRT),
(AccountId::from(hex!["9c322cfa42b80ffb1fa0a096ffbbe08ff44423ea7e6626183ba14bfb20c98c53"]), 5305599999),
(AccountId::from(hex!["1a84dfd9e4e30b0d48c4110bf7c509d5f27a68d4fade696dff3274e0afa09062"]), 1 * XRT),
(AccountId::from(hex!["8e5cda83432e069937b7e032ed8f88280a020aba933ee928eb936ab265f4c364"]), 10_000 * XRT),
];
}
}
/// Time.
pub mod time {
use node_primitives::{BlockNumber, Moment};
pub const MILLISECS_PER_BLOCK: Moment = 6000;
pub const SECS_PER_BLOCK: Moment = MILLISECS_PER_BLOCK / 1000;
pub const MINUTES: BlockNumber = 60 / (SECS_PER_BLOCK as BlockNumber);
pub const HOURS: BlockNumber = MINUTES * 60;
pub const DAYS: BlockNumber = HOURS * 24;
}
// CRITICAL NOTE: The system module maintains two constants: a _maximum_ block weight and a
// _ratio_ of it yielding the portion which is accessible to normal transactions (reserving the rest
// for operational ones). `TARGET_BLOCK_FULLNESS` is entirely independent and the system module is
// not aware of if, nor should it care about it. This constant simply denotes on which ratio of the
// _maximum_ block weight we tweak the fees. It does NOT care about the type of the dispatch.
//
// For the system to be configured in a sane way, `TARGET_BLOCK_FULLNESS` should always be less than
// the ratio that `system` module uses to find normal transaction quota.
/// Fee-related.
pub mod fee {
pub use sp_runtime::Perbill;
/// The block saturation level. Fees will be updates based on this value.
pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25);
}
|
deposit
|
lib.rs
|
use std::borrow::Cow;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use fxhash::FxHashMap;
use once_cell::sync::Lazy;
use regex::Regex;
use serde::{Deserialize, Serialize};
use sourcemap::{RawToken, SourceMap as RawSourcemap};
use swc_atoms::JsWord;
use swc_common::comments::Comments;
use swc_common::util::take::Take;
use swc_common::{BytePos, SourceMap, DUMMY_SP};
use swc_ecmascript::ast::{
ArrayLit, CallExpr, JSXAttr, JSXAttrName, JSXAttrOrSpread, JSXAttrValue, JSXElementName,
JSXExpr, JSXExprContainer, JSXObject,
};
use swc_ecmascript::utils::ident::IdentLike;
use swc_ecmascript::utils::{ExprFactory, Id};
use swc_ecmascript::{
ast::{
Callee, Expr, ExprOrSpread, Ident, ImportDecl, ImportSpecifier, JSXElement, KeyValueProp,
MemberProp, ObjectLit, Pat, Prop, PropName, PropOrSpread, Tpl, VarDeclarator,
},
codegen::util::SourceMapperExt,
visit::{Fold, FoldWith},
};
use swc_trace_macro::swc_trace;
mod hash;
static EMOTION_OFFICIAL_LIBRARIES: Lazy<Vec<EmotionModuleConfig>> = Lazy::new(|| {
vec![
EmotionModuleConfig {
module_name: "@emotion/styled".into(),
exported_names: vec![],
default_export: Some(ExprKind::Styled),
},
EmotionModuleConfig {
module_name: "@emotion/react".into(),
exported_names: vec![
ExportItem {
name: "css".to_owned(),
kind: ExprKind::Css,
},
ExportItem {
name: "keyframes".to_owned(),
kind: ExprKind::Css,
},
ExportItem {
name: "Global".to_owned(),
kind: ExprKind::GlobalJSX,
},
],
..Default::default()
},
EmotionModuleConfig {
module_name: "@emotion/primitives".into(),
exported_names: vec![ExportItem {
name: "css".to_owned(),
kind: ExprKind::Css,
}],
default_export: Some(ExprKind::Styled),
},
EmotionModuleConfig {
module_name: "@emotion/native".into(),
exported_names: vec![ExportItem {
name: "css".to_owned(),
kind: ExprKind::Css,
}],
default_export: Some(ExprKind::Styled),
},
]
});
static SPACE_AROUND_COLON: Lazy<Regex> =
Lazy::new(|| Regex::new(r"\s*(?P<s>[:|;|,|\{,\}])\s*").unwrap());
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct EmotionOptions {
pub enabled: Option<bool>,
pub sourcemap: Option<bool>,
pub auto_label: Option<bool>,
pub label_format: Option<String>,
}
impl Default for EmotionOptions {
fn default() -> Self {
EmotionOptions {
enabled: Some(false),
sourcemap: Some(true),
auto_label: Some(true),
label_format: Some("[local]".to_owned()),
}
}
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct EmotionModuleConfig {
module_name: JsWord,
exported_names: Vec<ExportItem>,
default_export: Option<ExprKind>,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
struct ExportItem {
name: String,
kind: ExprKind,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
enum ImportType {
Named,
Namespace,
Default,
}
impl Default for ImportType {
fn default() -> Self {
ImportType::Named
}
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
enum ExprKind {
Css,
Styled,
GlobalJSX,
}
impl Default for ExprKind {
fn default() -> Self {
ExprKind::Css
}
}
#[derive(Debug)]
enum PackageMeta {
Named(ExprKind),
Namespace(EmotionModuleConfig),
}
pub fn emotion<C: Comments>(
emotion_options: EmotionOptions,
path: &Path,
cm: Arc<SourceMap>,
comments: C,
) -> impl Fold {
EmotionTransformer::new(emotion_options, path, cm, comments)
}
pub struct EmotionTransformer<C: Comments> {
pub options: EmotionOptions,
filepath_hash: Option<u32>,
filepath: PathBuf,
dir: Option<String>,
filename: Option<String>,
cm: Arc<SourceMap>,
comments: C,
import_packages: FxHashMap<Id, PackageMeta>,
emotion_target_class_name_count: usize,
current_context: Option<String>,
// skip `css` transformation if it in JSX Element/Attribute
in_jsx_element: bool,
}
#[swc_trace]
impl<C: Comments> EmotionTransformer<C> {
pub fn new(options: EmotionOptions, path: &Path, cm: Arc<SourceMap>, comments: C) -> Self {
EmotionTransformer {
options,
filepath_hash: None,
filepath: path.to_owned(),
dir: path.parent().and_then(|p| p.to_str()).map(|s| s.to_owned()),
filename: path
.file_name()
.and_then(|filename| filename.to_str())
.map(|s| s.to_owned()),
cm,
comments,
import_packages: FxHashMap::default(),
emotion_target_class_name_count: 0,
current_context: None,
in_jsx_element: false,
}
}
#[inline]
// Compute file hash on demand
// Memorize the hash of the file name
fn get_filename_hash(&mut self) -> u32 {
if self.filepath_hash.is_none() {
self.filepath_hash = Some(hash::murmurhash2(
self.filepath.to_string_lossy().as_bytes(),
0,
));
}
self.filepath_hash.unwrap()
}
fn create_label(&self, with_prefix: bool) -> String {
let prefix = if with_prefix { "label:" } else { "" };
let mut label = format!(
"{}{}",
prefix,
self.options
.label_format
.clone()
.unwrap_or_else(|| "[local]".to_owned())
);
if let Some(current_context) = &self.current_context {
label = label.replace("[local]", current_context);
if let Some(filename) = self.filename.as_ref() {
label = label.replace("[filename]", filename);
}
if let Some(dir) = self.dir.as_ref() {
label = label.replace("[dir]", dir);
};
}
label
}
fn create_sourcemap(&mut self, pos: BytePos) -> Option<String> {
if self.options.sourcemap.unwrap_or(false) {
let loc = self.cm.get_code_map().lookup_char_pos(pos);
let filename = self.filepath.to_str().map(|s| s.to_owned());
let cm = RawSourcemap::new(
filename.clone(),
vec![RawToken {
dst_line: 0,
dst_col: 0,
src_line: loc.line as u32 - 1,
src_col: loc.col_display as u32,
src_id: 0,
name_id: 0,
}],
Vec::new(),
vec![filename.unwrap_or_default()],
Some(vec![Some(loc.file.src.to_string())]),
);
let mut writer = Vec::new();
if cm.to_writer(&mut writer).is_ok() {
return Some(format!(
"/*# sourceMappingURL=data:application/json;charset=utf-8;base64,{} */",
base64::encode(writer)
));
}
}
None
}
// Find the imported name from modules
// These import statements are supported:
// import styled from '@emotion/styled'
// import { default as whateverStyled } from '@emotion/styled'
// import { css } from '@emotion/react'
// import * as emotionCss from '@emotion/react'
fn generate_import_info(&mut self, expr: &ImportDecl) {
for c in EMOTION_OFFICIAL_LIBRARIES.iter() {
if expr.src.value == c.module_name {
for specifier in expr.specifiers.iter() {
match specifier {
ImportSpecifier::Named(named) => {
for exported in c.exported_names.iter() {
if named.local.as_ref() == exported.name {
self.import_packages.insert(
named.local.to_id(),
PackageMeta::Named(exported.kind),
);
}
}
}
ImportSpecifier::Default(default) => {
if let Some(kind) = c.default_export {
self.import_packages
.insert(default.local.to_id(), PackageMeta::Named(kind));
}
}
ImportSpecifier::Namespace(namespace) => {
self.import_packages
.insert(namespace.local.to_id(), PackageMeta::Namespace(c.clone()));
}
}
}
}
}
}
fn create_label_prop_node(&mut self, key: &str) -> PropOrSpread {
let stable_class_name = format!(
"e{}{}",
radix_fmt::radix_36(self.get_filename_hash()),
self.emotion_target_class_name_count
);
self.emotion_target_class_name_count += 1;
PropOrSpread::Prop(Box::new(Prop::KeyValue(KeyValueProp {
key: PropName::Ident(Ident::new(key.into(), DUMMY_SP)),
value: stable_class_name.into(),
})))
}
fn create_args_from_tagged_tpl(&self, tagged_tpl: &mut Tpl) -> Vec<ExprOrSpread> {
let args_len = tagged_tpl.exprs.len() + tagged_tpl.quasis.len();
// 2 more capacity is for `label` and `sourceMap`
let mut args = Vec::with_capacity(args_len + 2);
for index in 0..args_len {
let i = index / 2;
if index % 2 == 0 {
if let Some(q) = tagged_tpl.quasis.get_mut(i) {
let q = q.take();
let minified = minify_css_string(&q.raw, index == 0, index == args_len - 1);
// Compress one more spaces into one space
if minified.replace(' ', "").is_empty() {
if index != 0 && index != args_len - 1 {
args.push(" ".as_arg());
}
} else {
args.push(minified.as_arg())
}
}
} else if let Some(e) = tagged_tpl.exprs.get_mut(i) {
args.push(e.take().as_arg());
}
}
args
}
fn rewrite_styles_attr(&mut self, attrs: &mut [JSXAttrOrSpread], pos: BytePos) {
if let Some(attr_value) = attrs.iter_mut().find_map(|attr| {
if let JSXAttrOrSpread::JSXAttr(JSXAttr {
name: JSXAttrName::Ident(i),
value,
..
}) = attr
{
if i.as_ref() == "styles" {
return value.as_mut();
}
}
None
}) {
if let Some(raw_attr) = match attr_value {
JSXAttrValue::Lit(lit) => Some(Box::new(Expr::Lit(lit.clone()))),
JSXAttrValue::JSXExprContainer(JSXExprContainer {
expr: JSXExpr::Expr(expr),
..
}) => Some(expr.take()),
_ => None,
} {
*attr_value = self.create_styles_attr(raw_attr, pos);
self.in_jsx_element = true;
}
}
}
fn create_styles_attr(&mut self, mut raw_attr: Box<Expr>, pos: BytePos) -> JSXAttrValue {
if let Expr::Array(array_lit) = raw_attr.as_mut() {
if let Some(cm) = self.create_sourcemap(pos) {
array_lit.elems.push(Some(cm.as_arg()));
}
JSXAttrValue::JSXExprContainer(JSXExprContainer {
span: DUMMY_SP,
expr: JSXExpr::Expr(raw_attr),
})
} else {
JSXAttrValue::JSXExprContainer(JSXExprContainer {
span: DUMMY_SP,
expr: JSXExpr::Expr(Box::new(Expr::Array(ArrayLit {
span: DUMMY_SP,
elems: {
let mut elements = Vec::with_capacity(2);
elements.push(Some(raw_attr.as_arg()));
if let Some(cm) = self.create_sourcemap(pos) {
elements.push(Some(cm.as_arg()));
}
elements
},
}))),
})
}
}
}
impl<C: Comments> Fold for EmotionTransformer<C> {
// Collect import modules that indicator if this file need to be transformed
fn fold_import_decl(&mut self, expr: ImportDecl) -> ImportDecl {
if expr.type_only {
return expr;
}
self.generate_import_info(&expr);
expr
}
fn fold_var_declarator(&mut self, dec: VarDeclarator) -> VarDeclarator {
if let Pat::Ident(i) = &dec.name {
self.current_context = Some(i.id.as_ref().to_owned());
}
dec.fold_children_with(self)
}
fn fold_call_expr(&mut self, mut expr: CallExpr) -> CallExpr {
// If no package that we care about is imported, skip the following
// transformation logic.
if self.import_packages.is_empty() {
return expr;
}
if let Callee::Expr(e) = &mut expr.callee {
match e.as_mut() {
// css({})
Expr::Ident(i) => {
if let Some(package) = self.import_packages.get(&i.to_id()) {
if !expr.args.is_empty() {
if let PackageMeta::Named(kind) = package {
if matches!(kind, ExprKind::Css) && !self.in_jsx_element {
self.comments.add_pure_comment(expr.span.lo());
if self.options.auto_label.unwrap_or(false) {
expr.args.push(self.create_label(true).as_arg());
}
if let Some(cm) = self.create_sourcemap(expr.span.lo) {
expr.args.push(cm.as_arg());
}
}
}
}
}
}
// styled('div')({})
Expr::Call(c) => {
if let Callee::Expr(callee_exp) = &c.callee {
if let Expr::Ident(i) = callee_exp.as_ref() {
if let Some(PackageMeta::Named(ExprKind::Styled)) =
self.import_packages.get(&i.to_id())
{
if !c.args.is_empty() {
let mut args_props = Vec::with_capacity(2);
args_props.push(self.create_label_prop_node("target"));
self.comments.add_pure_comment(expr.span.lo());
if self.options.auto_label.unwrap_or(false) {
args_props.push(PropOrSpread::Prop(Box::new(
Prop::KeyValue(KeyValueProp {
key: PropName::Ident(Ident::new(
"label".into(),
DUMMY_SP,
)),
value: self.create_label(false).into(),
}),
)));
}
if let Some(cm) = self.create_sourcemap(expr.span.lo()) {
expr.args.push(cm.as_arg());
}
c.args.push(
Expr::Object(ObjectLit {
span: DUMMY_SP,
props: args_props,
})
.as_arg(),
);
}
}
}
}
}
// styled.div({})
// customEmotionReact.css({})
Expr::Member(m) => {
if let Expr::Ident(i) = m.obj.as_ref() {
if let Some(package) = self.import_packages.get(&i.to_id()) {
if let PackageMeta::Named(kind) = package {
if matches!(kind, ExprKind::Styled) {
if let MemberProp::Ident(prop) = &m.prop {
let mut args_props = Vec::with_capacity(2);
args_props.push(self.create_label_prop_node("target"));
let mut args = vec![prop.sym.as_ref().as_arg()];
if !self.in_jsx_element {
self.comments.add_pure_comment(expr.span.lo());
if self.options.auto_label.unwrap_or(false) {
args_props.push(PropOrSpread::Prop(Box::new(
Prop::KeyValue(KeyValueProp {
key: PropName::Ident(Ident::new(
"label".into(),
DUMMY_SP,
)),
value: self.create_label(false).into(),
}),
)));
}
args.push(
Expr::Object(ObjectLit {
span: DUMMY_SP,
props: args_props,
})
.as_arg(),
);
if let Some(cm) = self.create_sourcemap(expr.span.lo())
{
expr.args.push(cm.as_arg());
}
}
return CallExpr {
span: expr.span,
type_args: expr.type_args,
args: expr.args,
callee: CallExpr {
span: DUMMY_SP,
type_args: None,
callee: Ident::new(i.sym.clone(), i.span)
.as_callee(),
args,
}
.as_callee(),
};
}
}
}
if let PackageMeta::Namespace(c) = package {
if c.exported_names
.iter()
.any(|n| match_css_export(n, &m.prop))
{
self.comments.add_pure_comment(expr.span.lo());
if self.options.auto_label.unwrap_or(false) {
expr.args.push(self.create_label(true).as_arg());
}
if let Some(cm) = self.create_sourcemap(expr.span.lo()) {
expr.args.push(cm.as_arg());
}
}
}
}
}
}
_ => {}
}
}
expr
}
fn fold_expr(&mut self, mut expr: Expr) -> Expr {
if let Expr::TaggedTpl(tagged_tpl) = &mut expr {
// styled('div')``
match tagged_tpl.tag.as_mut() {
Expr::Call(call) => {
if let Callee::Expr(callee) = &call.callee {
if let Expr::Ident(i) = callee.as_ref() {
if let Some(PackageMeta::Named(ExprKind::Styled)) =
self.import_packages.get(&i.to_id())
{
let mut callee = call.take();
let mut object_props = Vec::with_capacity(2);
object_props.push(self.create_label_prop_node("target"));
self.comments.add_pure_comment(callee.span.lo());
if self.options.auto_label.unwrap_or(false) {
object_props.push(PropOrSpread::Prop(Box::new(
Prop::KeyValue(KeyValueProp {
key: PropName::Ident(Ident::new(
"label".into(),
DUMMY_SP,
)),
value: self.create_label(false).into(),
}),
)));
}
callee.args.push(
Expr::Object(ObjectLit {
span: DUMMY_SP,
props: object_props,
})
.as_arg(),
);
return Expr::Call(CallExpr {
span: DUMMY_SP,
callee: callee.as_callee(),
args: {
let mut args: Vec<ExprOrSpread> = self
.create_args_from_tagged_tpl(&mut tagged_tpl.tpl)
.into_iter()
.map(|exp| exp.fold_children_with(self))
.collect();
if let Some(cm) =
self.create_sourcemap(tagged_tpl.span.lo())
{
args.push(cm.as_arg());
}
args
},
type_args: None,
});
}
}
}
}
// css``
Expr::Ident(i) => {
if let Some(PackageMeta::Named(ExprKind::Css)) =
self.import_packages.get(&i.to_id())
{
let mut args = self.create_args_from_tagged_tpl(&mut tagged_tpl.tpl);
if !self.in_jsx_element {
self.comments.add_pure_comment(i.span.lo());
if self.options.auto_label.unwrap_or(false) {
args.push(self.create_label(false).as_arg());
}
if let Some(cm) = self.create_sourcemap(tagged_tpl.span.lo()) {
args.push(cm.as_arg());
}
}
return Expr::Call(CallExpr {
span: DUMMY_SP,
callee: i.take().as_callee(),
args,
type_args: None,
});
}
}
// styled.div``
// customEmotionReact.css``
Expr::Member(member_expr) => {
if let Expr::Ident(i) = member_expr.obj.as_mut() {
if let Some(p) = self.import_packages.get(&i.to_id()) {
match p {
PackageMeta::Named(ExprKind::Styled) => {
if let MemberProp::Ident(prop) = &mut member_expr.prop {
let mut object_props = Vec::with_capacity(2);
object_props.push(self.create_label_prop_node("target"));
if self.options.auto_label.unwrap_or(false) {
object_props.push(PropOrSpread::Prop(Box::new(
Prop::KeyValue(KeyValueProp {
key: PropName::Ident(Ident::new(
"label".into(),
DUMMY_SP,
)),
value: self.create_label(false).into(),
}),
)));
}
let mut args =
self.create_args_from_tagged_tpl(&mut tagged_tpl.tpl);
if let Some(cm) =
|
args.push(cm.as_arg());
}
self.comments.add_pure_comment(member_expr.span.lo());
return Expr::Call(CallExpr {
span: DUMMY_SP,
type_args: None,
callee: CallExpr {
type_args: None,
span: DUMMY_SP,
callee: i.take().as_callee(),
args: vec![
prop.take().sym.as_arg(),
Expr::Object(ObjectLit {
span: DUMMY_SP,
props: object_props,
})
.as_arg(),
],
}
.as_callee(),
args,
});
}
}
PackageMeta::Namespace(c) => {
if c.exported_names
.iter()
.any(|item| match_css_export(item, &member_expr.prop))
{
self.comments.add_pure_comment(member_expr.span.lo());
return Expr::Call(CallExpr {
span: DUMMY_SP,
callee: member_expr.take().as_callee(),
args: {
let mut args = self.create_args_from_tagged_tpl(
&mut tagged_tpl.tpl,
);
if self.options.auto_label.unwrap_or(false) {
args.push(self.create_label(true).as_arg());
}
if let Some(cm) =
self.create_sourcemap(tagged_tpl.span.lo())
{
args.push(cm.as_arg());
}
args
},
type_args: None,
});
}
}
_ => {}
}
}
}
}
_ => {}
}
}
expr.fold_children_with(self)
}
fn fold_jsx_element(&mut self, mut expr: JSXElement) -> JSXElement {
match &mut expr.opening.name {
JSXElementName::Ident(i) => {
if let Some(PackageMeta::Named(ExprKind::GlobalJSX)) =
self.import_packages.get(&i.to_id())
{
self.rewrite_styles_attr(&mut expr.opening.attrs, i.span.lo());
}
}
JSXElementName::JSXMemberExpr(member_exp) => {
if let JSXObject::Ident(i) = &member_exp.obj {
if let Some(PackageMeta::Namespace(EmotionModuleConfig {
exported_names,
..
})) = self.import_packages.get(&i.to_id())
{
if exported_names.iter().any(|item| {
matches!(item.kind, ExprKind::GlobalJSX)
&& item.name == member_exp.prop.as_ref()
}) {
self.rewrite_styles_attr(&mut expr.opening.attrs, i.span.lo());
}
}
}
}
_ => {}
};
let dest_expr = expr.fold_children_with(self);
self.in_jsx_element = false;
dest_expr
}
}
fn match_css_export(item: &ExportItem, prop: &MemberProp) -> bool {
if matches!(item.kind, ExprKind::Css) {
if let MemberProp::Ident(prop) = prop {
if item.name.as_str() == prop.sym.as_ref() {
return true;
}
}
}
false
}
#[inline]
fn minify_css_string(input: &str, is_first_item: bool, is_last_item: bool) -> Cow<str> {
let pattern = |c| c == '\n';
let pattern_trim_spaces = |c| c == ' ' || c == '\n';
SPACE_AROUND_COLON.replace_all(
input
.trim_start_matches(if is_first_item {
pattern_trim_spaces
} else {
pattern
})
.trim_end_matches(if is_last_item {
pattern_trim_spaces
} else {
pattern
}),
"$s",
)
}
#[allow(unused_imports)]
mod test_emotion {
use super::minify_css_string;
#[test]
fn should_not_trim_end_space_in_first_item() {
assert_eq!(
minify_css_string(
r#"
box-shadow: inset 0px 0px 0px "#,
true,
false
),
"box-shadow:inset 0px 0px 0px "
);
}
}
|
self.create_sourcemap(tagged_tpl.span.lo())
{
|
pools.go
|
package netapp
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// PoolsClient is the microsoft NetApp Azure Resource Provider specification
type PoolsClient struct {
BaseClient
}
// NewPoolsClient creates an instance of the PoolsClient client.
func
|
(subscriptionID string) PoolsClient {
return NewPoolsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewPoolsClientWithBaseURI creates an instance of the PoolsClient client using a custom endpoint. Use this when
// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewPoolsClientWithBaseURI(baseURI string, subscriptionID string) PoolsClient {
return PoolsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate create or Update a capacity pool
// Parameters:
// body - capacity pool object supplied in the body of the operation.
// resourceGroupName - the name of the resource group.
// accountName - the name of the NetApp account
// poolName - the name of the capacity pool
func (client PoolsClient) CreateOrUpdate(ctx context.Context, body CapacityPool, resourceGroupName string, accountName string, poolName string) (result PoolsCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PoolsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: body,
Constraints: []validation.Constraint{{Target: "body.Location", Name: validation.Null, Rule: true, Chain: nil},
{Target: "body.PoolProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "body.PoolProperties.PoolID", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.PoolProperties.PoolID", Name: validation.MaxLength, Rule: 36, Chain: nil},
{Target: "body.PoolProperties.PoolID", Name: validation.MinLength, Rule: 36, Chain: nil},
{Target: "body.PoolProperties.PoolID", Name: validation.Pattern, Rule: `^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$`, Chain: nil},
}},
{Target: "body.PoolProperties.Size", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "body.PoolProperties.Size", Name: validation.InclusiveMaximum, Rule: int64(549755813888000), Chain: nil},
{Target: "body.PoolProperties.Size", Name: validation.InclusiveMinimum, Rule: int64(4398046511104), Chain: nil},
}},
}}}},
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("netapp.PoolsClient", "CreateOrUpdate", err.Error())
}
req, err := client.CreateOrUpdatePreparer(ctx, body, resourceGroupName, accountName, poolName)
if err != nil {
err = autorest.NewErrorWithError(err, "netapp.PoolsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "netapp.PoolsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client PoolsClient) CreateOrUpdatePreparer(ctx context.Context, body CapacityPool, resourceGroupName string, accountName string, poolName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"poolName": autorest.Encode("path", poolName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
body.ID = nil
body.Name = nil
body.Type = nil
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}", pathParameters),
autorest.WithJSON(body),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client PoolsClient) CreateOrUpdateSender(req *http.Request) (future PoolsCreateOrUpdateFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client PoolsClient) (cp CapacityPool, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "netapp.PoolsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("netapp.PoolsCreateOrUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
cp.Response.Response, err = future.GetResult(sender)
if cp.Response.Response == nil && err == nil {
err = autorest.NewErrorWithError(err, "netapp.PoolsCreateOrUpdateFuture", "Result", nil, "received nil response and error")
}
if err == nil && cp.Response.Response.StatusCode != http.StatusNoContent {
cp, err = client.CreateOrUpdateResponder(cp.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "netapp.PoolsCreateOrUpdateFuture", "Result", cp.Response.Response, "Failure responding to request")
}
}
return
}
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client PoolsClient) CreateOrUpdateResponder(resp *http.Response) (result CapacityPool, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete delete the specified capacity pool
// Parameters:
// resourceGroupName - the name of the resource group.
// accountName - the name of the NetApp account
// poolName - the name of the capacity pool
func (client PoolsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, poolName string) (result PoolsDeleteFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PoolsClient.Delete")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("netapp.PoolsClient", "Delete", err.Error())
}
req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, poolName)
if err != nil {
err = autorest.NewErrorWithError(err, "netapp.PoolsClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "netapp.PoolsClient", "Delete", nil, "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client PoolsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, poolName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"poolName": autorest.Encode("path", poolName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client PoolsClient) DeleteSender(req *http.Request) (future PoolsDeleteFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client PoolsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "netapp.PoolsDeleteFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("netapp.PoolsDeleteFuture")
return
}
ar.Response = future.Response()
return
}
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client PoolsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get get details of the specified capacity pool
// Parameters:
// resourceGroupName - the name of the resource group.
// accountName - the name of the NetApp account
// poolName - the name of the capacity pool
func (client PoolsClient) Get(ctx context.Context, resourceGroupName string, accountName string, poolName string) (result CapacityPool, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PoolsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("netapp.PoolsClient", "Get", err.Error())
}
req, err := client.GetPreparer(ctx, resourceGroupName, accountName, poolName)
if err != nil {
err = autorest.NewErrorWithError(err, "netapp.PoolsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "netapp.PoolsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "netapp.PoolsClient", "Get", resp, "Failure responding to request")
return
}
return
}
// GetPreparer prepares the Get request.
func (client PoolsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, poolName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"poolName": autorest.Encode("path", poolName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client PoolsClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client PoolsClient) GetResponder(resp *http.Response) (result CapacityPool, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List list all capacity pools in the NetApp Account
// Parameters:
// resourceGroupName - the name of the resource group.
// accountName - the name of the NetApp account
func (client PoolsClient) List(ctx context.Context, resourceGroupName string, accountName string) (result CapacityPoolList, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PoolsClient.List")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("netapp.PoolsClient", "List", err.Error())
}
req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
if err != nil {
err = autorest.NewErrorWithError(err, "netapp.PoolsClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "netapp.PoolsClient", "List", resp, "Failure sending request")
return
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "netapp.PoolsClient", "List", resp, "Failure responding to request")
return
}
return
}
// ListPreparer prepares the List request.
func (client PoolsClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client PoolsClient) ListSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client PoolsClient) ListResponder(resp *http.Response) (result CapacityPoolList, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Update patch the specified capacity pool
// Parameters:
// body - capacity pool object supplied in the body of the operation.
// resourceGroupName - the name of the resource group.
// accountName - the name of the NetApp account
// poolName - the name of the capacity pool
func (client PoolsClient) Update(ctx context.Context, body CapacityPoolPatch, resourceGroupName string, accountName string, poolName string) (result CapacityPool, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PoolsClient.Update")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("netapp.PoolsClient", "Update", err.Error())
}
req, err := client.UpdatePreparer(ctx, body, resourceGroupName, accountName, poolName)
if err != nil {
err = autorest.NewErrorWithError(err, "netapp.PoolsClient", "Update", nil, "Failure preparing request")
return
}
resp, err := client.UpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "netapp.PoolsClient", "Update", resp, "Failure sending request")
return
}
result, err = client.UpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "netapp.PoolsClient", "Update", resp, "Failure responding to request")
return
}
return
}
// UpdatePreparer prepares the Update request.
func (client PoolsClient) UpdatePreparer(ctx context.Context, body CapacityPoolPatch, resourceGroupName string, accountName string, poolName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"poolName": autorest.Encode("path", poolName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
body.ID = nil
body.Name = nil
body.Type = nil
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}", pathParameters),
autorest.WithJSON(body),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client PoolsClient) UpdateSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client PoolsClient) UpdateResponder(resp *http.Response) (result CapacityPool, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
|
NewPoolsClient
|
rustpkg.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// rustpkg - a package manager and build system for Rust
#[link(name = "rustpkg",
vers = "0.8-pre",
uuid = "25de5e6e-279e-4a20-845c-4cabae92daaf",
url = "https://github.com/mozilla/rust/tree/master/src/librustpkg")];
#[license = "MIT/ASL2"];
#[crate_type = "lib"];
extern mod extra;
extern mod rustc;
extern mod syntax;
use std::result;
use std::io;
use std::os;
use std::run;
use std::str;
pub use std::path::Path;
use std::hashmap::HashMap;
use rustc::driver::{driver, session};
use rustc::metadata::filesearch;
use rustc::metadata::filesearch::rust_path;
use extra::{getopts};
use syntax::{ast, diagnostic};
use util::*;
use messages::*;
use path_util::{build_pkg_id_in_workspace, first_pkgid_src_in_workspace};
use path_util::{U_RWX, in_rust_path};
use path_util::{built_executable_in_workspace, built_library_in_workspace, default_workspace};
use path_util::{target_executable_in_workspace, target_library_in_workspace};
use source_control::is_git_dir;
use workspace::{each_pkg_parent_workspace, pkg_parent_workspaces, cwd_to_workspace};
use context::Ctx;
use package_id::PkgId;
use package_source::PkgSrc;
pub mod api;
mod conditions;
mod context;
mod crate;
mod installed_packages;
mod messages;
mod package_id;
mod package_source;
mod path_util;
mod search;
mod source_control;
mod target;
#[cfg(test)]
mod tests;
mod util;
mod version;
mod workspace;
pub mod usage;
/// A PkgScript represents user-supplied custom logic for
/// special build hooks. This only exists for packages with
/// an explicit package script.
struct PkgScript<'self> {
/// Uniquely identifies this package
id: &'self PkgId,
// Used to have this field: deps: ~[(~str, Option<~str>)]
// but I think it shouldn't be stored here
/// The contents of the package script: either a file path,
/// or a string containing the text of the input
input: driver::input,
/// The session to use *only* for compiling the custom
/// build script
sess: session::Session,
/// The config for compiling the custom build script
cfg: ast::CrateConfig,
/// The crate for the custom build script
crate: @ast::Crate,
/// Directory in which to store build output
build_dir: Path
}
impl<'self> PkgScript<'self> {
/// Given the path name for a package script
/// and a package ID, parse the package script into
/// a PkgScript that we can then execute
fn parse<'a>(sysroot: @Path,
script: Path,
workspace: &Path,
id: &'a PkgId) -> PkgScript<'a> {
// Get the executable name that was invoked
let binary = os::args()[0].to_managed();
// Build the rustc session data structures to pass
// to the compiler
debug!("pkgscript parse: %s", sysroot.to_str());
let options = @session::options {
binary: binary,
maybe_sysroot: Some(sysroot),
crate_type: session::bin_crate,
.. (*session::basic_options()).clone()
};
let input = driver::file_input(script);
let sess = driver::build_session(options, diagnostic::emit);
let cfg = driver::build_configuration(sess);
let crate = driver::phase_1_parse_input(sess, cfg.clone(), &input);
let crate = driver::phase_2_configure_and_expand(sess, cfg.clone(), crate);
let work_dir = build_pkg_id_in_workspace(id, workspace);
debug!("Returning package script with id %s", id.to_str());
PkgScript {
id: id,
input: input,
sess: sess,
cfg: cfg,
crate: crate,
build_dir: work_dir
}
}
/// Run the contents of this package script, where <what>
/// is the command to pass to it (e.g., "build", "clean", "install")
/// Returns a pair of an exit code and list of configs (obtained by
/// calling the package script's configs() function if it exists
// FIXME (#4432): Use workcache to only compile the script when changed
fn run_custom(&self, sysroot: @Path) -> (~[~str], ExitCode) {
let sess = self.sess;
debug!("Working directory = %s", self.build_dir.to_str());
// Collect together any user-defined commands in the package script
let crate = util::ready_crate(sess, self.crate);
debug!("Building output filenames with script name %s",
driver::source_name(&self.input));
let exe = self.build_dir.push(~"pkg" + util::exe_suffix());
util::compile_crate_from_input(&self.input,
&self.build_dir,
sess,
crate);
debug!("Running program: %s %s %s", exe.to_str(),
sysroot.to_str(), "install");
// FIXME #7401 should support commands besides `install`
let status = run::process_status(exe.to_str(), [sysroot.to_str(), ~"install"]);
if status != 0 {
return (~[], status);
}
else {
debug!("Running program (configs): %s %s %s",
exe.to_str(), sysroot.to_str(), "configs");
let output = run::process_output(exe.to_str(), [sysroot.to_str(), ~"configs"]);
// Run the configs() function to get the configs
let cfgs = str::from_bytes_slice(output.output).word_iter()
.map(|w| w.to_owned()).collect();
(cfgs, output.status)
}
}
fn hash(&self) -> ~str {
self.id.hash()
}
}
pub trait CtxMethods {
fn run(&self, cmd: &str, args: ~[~str]);
fn do_cmd(&self, _cmd: &str, _pkgname: &str);
fn build(&self, workspace: &Path, pkgid: &PkgId);
fn clean(&self, workspace: &Path, id: &PkgId);
fn info(&self);
fn install(&self, workspace: &Path, id: &PkgId);
fn install_no_build(&self, workspace: &Path, id: &PkgId);
fn prefer(&self, _id: &str, _vers: Option<~str>);
fn test(&self);
fn uninstall(&self, _id: &str, _vers: Option<~str>);
fn unprefer(&self, _id: &str, _vers: Option<~str>);
}
impl CtxMethods for Ctx {
fn run(&self, cmd: &str, args: ~[~str]) {
match cmd {
"build" => {
if args.len() < 1 {
match cwd_to_workspace() {
None => { usage::build(); return }
Some((ws, pkgid)) => self.build(&ws, &pkgid)
}
}
else {
// The package id is presumed to be the first command-line
// argument
let pkgid = PkgId::new(args[0].clone());
do each_pkg_parent_workspace(&pkgid) |workspace| {
debug!("found pkg %s in workspace %s, trying to build",
pkgid.to_str(), workspace.to_str());
self.build(workspace, &pkgid);
true
};
}
}
"clean" => {
if args.len() < 1 {
match cwd_to_workspace() {
None => { usage::clean(); return }
// tjc: Maybe clean should clean all the packages in the
// current workspace, though?
Some((ws, pkgid)) => self.clean(&ws, &pkgid)
}
}
else {
// The package id is presumed to be the first command-line
// argument
let pkgid = PkgId::new(args[0].clone());
let cwd = os::getcwd();
self.clean(&cwd, &pkgid); // tjc: should use workspace, not cwd
}
}
"do" => {
if args.len() < 2 {
return usage::do_cmd();
}
self.do_cmd(args[0].clone(), args[1].clone());
}
"info" => {
self.info();
}
"install" => {
if args.len() < 1 {
match cwd_to_workspace() {
None => { usage::install(); return }
Some((ws, pkgid)) => self.install(&ws, &pkgid)
}
}
else {
// The package id is presumed to be the first command-line
// argument
let pkgid = PkgId::new(args[0]);
let workspaces = pkg_parent_workspaces(&pkgid);
debug!("package ID = %s, found it in %? workspaces",
pkgid.to_str(), workspaces.len());
if workspaces.is_empty() {
let rp = rust_path();
assert!(!rp.is_empty());
let src = PkgSrc::new(&rp[0], &pkgid);
src.fetch_git();
self.install(&rp[0], &pkgid);
}
else {
do each_pkg_parent_workspace(&pkgid) |workspace| {
self.install(workspace, &pkgid);
true
};
}
}
}
"list" => {
io::println("Installed packages:");
do installed_packages::list_installed_packages |pkg_id| {
println(pkg_id.path.to_str());
true
};
}
"prefer" => {
if args.len() < 1 {
return usage::uninstall();
}
self.prefer(args[0], None);
}
"test" => {
self.test();
}
"uninstall" => {
if args.len() < 1 {
return usage::uninstall();
}
let pkgid = PkgId::new(args[0]);
if !installed_packages::package_is_installed(&pkgid) {
warn(fmt!("Package %s doesn't seem to be installed! Doing nothing.", args[0]));
return;
}
else {
let rp = rust_path();
assert!(!rp.is_empty());
do each_pkg_parent_workspace(&pkgid) |workspace| {
path_util::uninstall_package_from(workspace, &pkgid);
note(fmt!("Uninstalled package %s (was installed in %s)",
pkgid.to_str(), workspace.to_str()));
true
};
}
}
"unprefer" => {
if args.len() < 1 {
return usage::unprefer();
}
self.unprefer(args[0], None);
}
_ => fail!(fmt!("I don't know the command `%s`", cmd))
}
}
fn do_cmd(&self, _cmd: &str, _pkgname: &str) {
// stub
fail!("`do` not yet implemented");
}
fn build(&self, workspace: &Path, pkgid: &PkgId) {
debug!("build: workspace = %s (in Rust path? %? is git dir? %? \
pkgid = %s", workspace.to_str(),
in_rust_path(workspace), is_git_dir(&workspace.push_rel(&pkgid.path)),
pkgid.to_str());
let src_dir = first_pkgid_src_in_workspace(pkgid, workspace);
// If workspace isn't in the RUST_PATH, and it's a git repo,
// then clone it into the first entry in RUST_PATH, and repeat
debug!("%? %? %s", in_rust_path(workspace),
is_git_dir(&workspace.push_rel(&pkgid.path)),
workspace.to_str());
if !in_rust_path(workspace) && is_git_dir(&workspace.push_rel(&pkgid.path)) {
let out_dir = default_workspace().push("src").push_rel(&pkgid.path);
source_control::git_clone(&workspace.push_rel(&pkgid.path),
&out_dir, &pkgid.version);
let default_ws = default_workspace();
debug!("Calling build recursively with %? and %?", default_ws.to_str(),
pkgid.to_str());
return self.build(&default_ws, pkgid);
}
// Create the package source
let mut src = PkgSrc::new(workspace, pkgid);
debug!("Package src = %?", src);
// Is there custom build logic? If so, use it
let pkg_src_dir = src_dir;
let mut custom = false;
debug!("Package source directory = %?", pkg_src_dir);
let cfgs = match pkg_src_dir.chain_ref(|p| src.package_script_option(p)) {
Some(package_script_path) => {
let sysroot = self.sysroot_to_use().expect("custom build needs a sysroot");
let pscript = PkgScript::parse(sysroot,
package_script_path,
workspace,
pkgid);
let (cfgs, hook_result) = pscript.run_custom(sysroot);
debug!("Command return code = %?", hook_result);
if hook_result != 0 {
fail!("Error running custom build command")
}
custom = true;
// otherwise, the package script succeeded
cfgs
}
None => {
debug!("No package script, continuing");
~[]
}
};
// If there was a package script, it should have finished
// the build already. Otherwise...
if !custom {
// Find crates inside the workspace
src.find_crates();
// Build it!
src.build(self, cfgs);
}
}
fn clean(&self, workspace: &Path, id: &PkgId) {
// Could also support a custom build hook in the pkg
// script for cleaning files rustpkg doesn't know about.
// Do something reasonable for now
let dir = build_pkg_id_in_workspace(id, workspace);
note(fmt!("Cleaning package %s (removing directory %s)",
id.to_str(), dir.to_str()));
if os::path_exists(&dir) {
os::remove_dir_recursive(&dir);
note(fmt!("Removed directory %s", dir.to_str()));
}
note(fmt!("Cleaned package %s", id.to_str()));
}
fn info(&self) {
// stub
fail!("info not yet implemented");
}
fn install(&self, workspace: &Path, id: &PkgId) {
// FIXME #7402: Use RUST_PATH to determine target dir
// Also should use workcache to not build if not necessary.
self.build(workspace, id);
debug!("install: workspace = %s, id = %s", workspace.to_str(),
id.to_str());
self.install_no_build(workspace, id);
}
fn install_no_build(&self, workspace: &Path, id: &PkgId) {
use conditions::copy_failed::cond;
// Now copy stuff into the install dirs
let maybe_executable = built_executable_in_workspace(id, workspace);
let maybe_library = built_library_in_workspace(id, workspace);
let target_exec = target_executable_in_workspace(id, workspace);
let target_lib = maybe_library.map(|_p| target_library_in_workspace(id, workspace));
debug!("target_exec = %s target_lib = %? \
maybe_executable = %? maybe_library = %?",
target_exec.to_str(), target_lib,
maybe_executable, maybe_library);
for exec in maybe_executable.iter() {
debug!("Copying: %s -> %s", exec.to_str(), target_exec.to_str());
if !(os::mkdir_recursive(&target_exec.dir_path(), U_RWX) &&
os::copy_file(exec, &target_exec)) {
cond.raise(((*exec).clone(), target_exec.clone()));
}
}
for lib in maybe_library.iter() {
let target_lib = target_lib.clone().expect(fmt!("I built %s but apparently \
didn't install it!", lib.to_str()));
let target_lib = target_lib.pop().push(lib.filename().expect("weird target lib"));
debug!("Copying: %s -> %s", lib.to_str(), target_lib.to_str());
if !(os::mkdir_recursive(&target_lib.dir_path(), U_RWX) &&
os::copy_file(lib, &target_lib)) {
cond.raise(((*lib).clone(), target_lib.clone()));
}
}
}
fn prefer(&self, _id: &str, _vers: Option<~str>) {
fail!("prefer not yet implemented");
}
fn test(&self) {
// stub
fail!("test not yet implemented");
}
fn uninstall(&self, _id: &str, _vers: Option<~str>) {
fail!("uninstall not yet implemented");
}
fn unprefer(&self, _id: &str, _vers: Option<~str>) {
fail!("unprefer not yet implemented");
}
}
pub fn main() {
io::println("WARNING: The Rust package manager is experimental and may be unstable");
let args = os::args();
main_args(args);
}
pub fn main_args(args: &[~str]) {
let opts = ~[getopts::optflag("h"), getopts::optflag("help"),
getopts::optflag("j"), getopts::optflag("json"),
getopts::optmulti("c"), getopts::optmulti("cfg"),
getopts::optflag("v"), getopts::optflag("version")];
let matches = &match getopts::getopts(args, opts) {
result::Ok(m) => m,
result::Err(f) => {
error(fmt!("%s", getopts::fail_str(f)));
return;
}
};
let help = getopts::opt_present(matches, "h") ||
getopts::opt_present(matches, "help");
let json = getopts::opt_present(matches, "j") ||
getopts::opt_present(matches, "json");
if getopts::opt_present(matches, "v") ||
getopts::opt_present(matches, "version") {
rustc::version(args[0]);
return;
}
let mut args = matches.free.clone();
args.shift();
if (args.len() < 1) {
return usage::general();
}
let cmd = args.shift();
if !util::is_cmd(cmd) {
return usage::general();
} else if help {
return match cmd {
~"build" => usage::build(),
~"clean" => usage::clean(),
~"do" => usage::do_cmd(),
~"info" => usage::info(),
~"install" => usage::install(),
~"list" => usage::list(),
~"prefer" => usage::prefer(),
~"test" => usage::test(),
~"uninstall" => usage::uninstall(),
~"unprefer" => usage::unprefer(),
_ => usage::general()
};
}
let sroot = Some(@filesearch::get_or_default_sysroot());
debug!("Using sysroot: %?", sroot);
Ctx {
sysroot_opt: sroot, // Currently, only tests override this
json: json,
dep_cache: @mut HashMap::new()
}.run(cmd, args);
}
/**
* Get the working directory of the package script.
* Assumes that the package script has been compiled
* in is the working directory.
*/
pub fn work_dir() -> Path
|
/**
* Get the source directory of the package (i.e.
* where the crates are located). Assumes
* that the cwd is changed to it before
* running this executable.
*/
pub fn src_dir() -> Path {
os::getcwd()
}
|
{
os::self_exe_path().unwrap()
}
|
EditElementModal.tsx
|
import * as React from "react";
import IconButton from "../shared/icon-button/IconButton";
import Modal from "../shared/modal/Modal";
import { i18n } from "../../Locale";
import ElementManager, { getElementLocales } from "../../ElementManager";
import { IMassCalculatorElement } from "./hooks/useMassCalculator";
interface EditElementModalProps {
isOpen: boolean;
onClose: () => void;
increaseQuantity: () => void;
decreaseQuantity: () => void;
changeQuantity: (quantity: number) => void;
selectedElement?: IMassCalculatorElement;
}
function EditElementModal({
isOpen,
selectedElement,
increaseQuantity,
decreaseQuantity,
changeQuantity,
onClose,
}: EditElementModalProps) {
if (!selectedElement) return null;
const element = ElementManager.getElement(selectedElement.atomic);
if (!element) {
return null;
}
|
title={elementLocales.name}
closeButton={true}
className="mass-calculator__modify-element-modal"
open={isOpen}
onClose={onClose}
>
<div className="mass-calculator__modify-element-modal__controls">
<IconButton iconName="remove" onClick={decreaseQuantity} />
<input
className="mass-calculator__modify-element-modal__amount-input"
type="tel"
name="amount"
value={selectedElement.quantity}
onChange={(event: React.FormEvent<HTMLInputElement>) => {
const amount = parseInt(event.currentTarget.value, 10);
changeQuantity(amount);
}}
/>
<IconButton iconName="add" onClick={increaseQuantity} />
</div>
<div className="mass-calculator__modify-element-modal__text">
{i18n("change_amount")}
</div>
</Modal>
);
}
export default EditElementModal;
|
const elementLocales = getElementLocales(element);
return (
<Modal
|
tex_cnn_rest.py
|
#! /usr/bin/env python
from tornado import httpserver
from tornado import gen
from tornado.ioloop import IOLoop
import tornado.web
import json
import single_eval as sev
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello,This is TextCNN")
class ClassifyHandler(tornado.web.RequestHandler):
def get(self):
data = self.get_argument('q', 'Hello')
predict_result = sev.classify(data)
self.write("this is Classfication for text,get method and result:{}".format(predict_result))
def post(self):
self.write("this is classfication for text ,post method")
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/?",IndexHandler),
(r"/classify/?",ClassifyHandler)
]
tornado.web.Application.__init__(self,handlers=handlers)
def main():
|
if __name__ == '__main__':
main()
|
app = Application()
app.listen(80)
IOLoop.instance().start()
|
test_engine.py
|
# -*- encoding: utf-8
from sqlalchemy.testing import eq_, engines
from sqlalchemy import *
from sqlalchemy import exc
from sqlalchemy.dialects.mssql import pyodbc, pymssql
from sqlalchemy.engine import url
from sqlalchemy.testing import fixtures
from sqlalchemy import testing
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing.mock import Mock
class ParseConnectTest(fixtures.TestBase):
def test_pyodbc_connect_dsn_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql://mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Trusted_Connection=Yes'], {}], connection)
def test_pyodbc_connect_old_style_dsn_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql:///?dsn=mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Trusted_Connection=Yes'], {}], connection)
def test_pyodbc_connect_dsn_non_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql://username:password@mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;UID=username;PWD=password'], {}], connection)
def test_pyodbc_connect_dsn_extra(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@mydsn/?LANGUAGE=us_'
'english&foo=bar')
connection = dialect.create_connect_args(u)
dsn_string = connection[0][0]
assert ";LANGUAGE=us_english" in dsn_string
assert ";foo=bar" in dsn_string
def test_pyodbc_connect(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql://username:password@hostspec/database')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password'], {}], connection)
def test_pyodbc_connect_comma_port(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec:12345/data'
'base')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec,12345;Database=datab'
'ase;UID=username;PWD=password'], {}], connection)
def test_pyodbc_connect_config_port(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec/database?p'
'ort=12345')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password;port=12345'], {}], connection)
def test_pyodbc_extra_connect(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec/database?L'
'ANGUAGE=us_english&foo=bar')
connection = dialect.create_connect_args(u)
eq_(connection[1], {})
eq_(connection[0][0]
in ('DRIVER={SQL Server};Server=hostspec;Database=database;'
'UID=username;PWD=password;foo=bar;LANGUAGE=us_english',
'DRIVER={SQL Server};Server=hostspec;Database=database;UID='
'username;PWD=password;LANGUAGE=us_english;foo=bar'), True)
def test_pyodbc_odbc_connect(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql:///?odbc_connect=DRIVER%3D%7BSQL+Server'
'%7D%3BServer%3Dhostspec%3BDatabase%3Ddatabase'
'%3BUID%3Dusername%3BPWD%3Dpassword')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password'], {}], connection)
def test_pyodbc_odbc_connect_with_dsn(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql:///?odbc_connect=dsn%3Dmydsn%3BDatabase'
'%3Ddatabase%3BUID%3Dusername%3BPWD%3Dpassword'
)
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Database=database;UID=username;PWD=password'],
{}], connection)
def test_pyodbc_odbc_connect_ignores_other_values(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://userdiff:passdiff@localhost/dbdiff?od'
'bc_connect=DRIVER%3D%7BSQL+Server%7D%3BServer'
'%3Dhostspec%3BDatabase%3Ddatabase%3BUID%3Duse'
'rname%3BPWD%3Dpassword')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password'], {}], connection)
def test_pymssql_port_setting(self):
dialect = pymssql.dialect()
u = \
url.make_url('mssql+pymssql://scott:tiger@somehost/test')
connection = dialect.create_connect_args(u)
eq_(
[[], {'host': 'somehost', 'password': 'tiger',
'user': 'scott', 'database': 'test'}], connection
)
u = \
url.make_url('mssql+pymssql://scott:tiger@somehost:5000/test')
connection = dialect.create_connect_args(u)
eq_(
[[], {'host': 'somehost:5000', 'password': 'tiger',
'user': 'scott', 'database': 'test'}], connection
)
def test_pymssql_disconnect(self):
dialect = pymssql.dialect()
for error in [
'Adaptive Server connection timed out',
'Net-Lib error during Connection reset by peer',
'message 20003',
'Error 10054',
'Not connected to any MS SQL server',
'Connection is closed'
]:
eq_(dialect.is_disconnect(error, None, None), True)
eq_(dialect.is_disconnect("not an error", None, None), False)
@testing.only_on(['mssql+pyodbc', 'mssql+pymssql'],
"FreeTDS specific test")
def test_bad_freetds_warning(self):
engine = engines.testing_engine()
def _bad_version(connection):
|
return 95, 10, 255
engine.dialect._get_server_version_info = _bad_version
assert_raises_message(exc.SAWarning,
'Unrecognized server version info',
engine.connect)
class VersionDetectionTest(fixtures.TestBase):
def test_pymssql_version(self):
dialect = pymssql.MSDialect_pymssql()
for vers in [
"Microsoft SQL Server Blah - 11.0.9216.62",
"Microsoft SQL Server (XYZ) - 11.0.9216.62 \n"
"Jul 18 2014 22:00:21 \nCopyright (c) Microsoft Corporation",
"Microsoft SQL Azure (RTM) - 11.0.9216.62 \n"
"Jul 18 2014 22:00:21 \nCopyright (c) Microsoft Corporation"
]:
conn = Mock(scalar=Mock(return_value=vers))
eq_(
dialect._get_server_version_info(conn),
(11, 0, 9216, 62)
)
| |
constants.js
|
// TODO - document these options
export const DEFAULT_LOADER_OPTIONS = {
CDN: 'https://unpkg.com/@loaders.gl',
worker: true, // By default, use worker if provided by loader
log: new ConsoleLog(), // A probe.gl compatible (`log.log()()` syntax) that just logs to console
dataType: 'arraybuffer' // TODO - explain why this option is needed for parsing
};
|
import {ConsoleLog} from './loader-utils/loggers';
|
|
resource_usage_gatherer.go
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"fmt"
"math"
"sort"
"strconv"
"strings"
"sync"
"text/tabwriter"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
"k8s.io/kubernetes/test/e2e/system"
)
// ResourceConstraint is a struct to hold constraints.
type ResourceConstraint struct {
CPUConstraint float64
MemoryConstraint uint64
}
// SingleContainerSummary is a struct to hold single container summary.
type SingleContainerSummary struct {
Name string
CPU float64
Mem uint64
}
// ResourceUsageSummary is a struct to hold resource usage summary.
// we can't have int here, as JSON does not accept integer keys.
type ResourceUsageSummary map[string][]SingleContainerSummary
// NoCPUConstraint is the number of constraint for CPU.
const NoCPUConstraint = math.MaxFloat64
// PrintHumanReadable prints resource usage summary in human readable.
func (s *ResourceUsageSummary) PrintHumanReadable() string {
buf := &bytes.Buffer{}
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
for perc, summaries := range *s {
buf.WriteString(fmt.Sprintf("%v percentile:\n", perc))
fmt.Fprintf(w, "container\tcpu(cores)\tmemory(MB)\n")
for _, summary := range summaries {
fmt.Fprintf(w, "%q\t%.3f\t%.2f\n", summary.Name, summary.CPU, float64(summary.Mem)/(1024*1024))
}
w.Flush()
}
return buf.String()
}
// PrintJSON prints resource usage summary in JSON.
func (s *ResourceUsageSummary) PrintJSON() string {
return PrettyPrintJSON(*s)
}
// SummaryKind returns string of ResourceUsageSummary
func (s *ResourceUsageSummary) SummaryKind() string {
return "ResourceUsageSummary"
}
type uint64arr []uint64
func (a uint64arr) Len() int { return len(a) }
func (a uint64arr) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a uint64arr) Less(i, j int) bool { return a[i] < a[j] }
type usageDataPerContainer struct {
cpuData []float64
memUseData []uint64
memWorkSetData []uint64
}
func
|
(timeSeries []e2ekubelet.ResourceUsagePerContainer, percentilesToCompute []int) map[int]e2ekubelet.ResourceUsagePerContainer {
if len(timeSeries) == 0 {
return make(map[int]e2ekubelet.ResourceUsagePerContainer)
}
dataMap := make(map[string]*usageDataPerContainer)
for i := range timeSeries {
for name, data := range timeSeries[i] {
if dataMap[name] == nil {
dataMap[name] = &usageDataPerContainer{
cpuData: make([]float64, 0, len(timeSeries)),
memUseData: make([]uint64, 0, len(timeSeries)),
memWorkSetData: make([]uint64, 0, len(timeSeries)),
}
}
dataMap[name].cpuData = append(dataMap[name].cpuData, data.CPUUsageInCores)
dataMap[name].memUseData = append(dataMap[name].memUseData, data.MemoryUsageInBytes)
dataMap[name].memWorkSetData = append(dataMap[name].memWorkSetData, data.MemoryWorkingSetInBytes)
}
}
for _, v := range dataMap {
sort.Float64s(v.cpuData)
sort.Sort(uint64arr(v.memUseData))
sort.Sort(uint64arr(v.memWorkSetData))
}
result := make(map[int]e2ekubelet.ResourceUsagePerContainer)
for _, perc := range percentilesToCompute {
data := make(e2ekubelet.ResourceUsagePerContainer)
for k, v := range dataMap {
percentileIndex := int(math.Ceil(float64(len(v.cpuData)*perc)/100)) - 1
data[k] = &e2ekubelet.ContainerResourceUsage{
Name: k,
CPUUsageInCores: v.cpuData[percentileIndex],
MemoryUsageInBytes: v.memUseData[percentileIndex],
MemoryWorkingSetInBytes: v.memWorkSetData[percentileIndex],
}
}
result[perc] = data
}
return result
}
func leftMergeData(left, right map[int]e2ekubelet.ResourceUsagePerContainer) map[int]e2ekubelet.ResourceUsagePerContainer {
result := make(map[int]e2ekubelet.ResourceUsagePerContainer)
for percentile, data := range left {
result[percentile] = data
if _, ok := right[percentile]; !ok {
continue
}
for k, v := range right[percentile] {
result[percentile][k] = v
}
}
return result
}
type resourceGatherWorker struct {
c clientset.Interface
nodeName string
wg *sync.WaitGroup
containerIDs []string
stopCh chan struct{}
dataSeries []e2ekubelet.ResourceUsagePerContainer
finished bool
inKubemark bool
resourceDataGatheringPeriod time.Duration
probeDuration time.Duration
printVerboseLogs bool
}
func (w *resourceGatherWorker) singleProbe() {
data := make(e2ekubelet.ResourceUsagePerContainer)
if w.inKubemark {
kubemarkData := GetKubemarkMasterComponentsResourceUsage()
if data == nil {
return
}
for k, v := range kubemarkData {
data[k] = &e2ekubelet.ContainerResourceUsage{
Name: v.Name,
MemoryWorkingSetInBytes: v.MemoryWorkingSetInBytes,
CPUUsageInCores: v.CPUUsageInCores,
}
}
} else {
nodeUsage, err := e2ekubelet.GetOneTimeResourceUsageOnNode(w.c, w.nodeName, w.probeDuration, func() []string { return w.containerIDs })
if err != nil {
Logf("Error while reading data from %v: %v", w.nodeName, err)
return
}
for k, v := range nodeUsage {
data[k] = v
if w.printVerboseLogs {
Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
}
}
}
w.dataSeries = append(w.dataSeries, data)
}
func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
defer utilruntime.HandleCrash()
defer w.wg.Done()
defer Logf("Closing worker for %v", w.nodeName)
defer func() { w.finished = true }()
select {
case <-time.After(initialSleep):
w.singleProbe()
for {
select {
case <-time.After(w.resourceDataGatheringPeriod):
w.singleProbe()
case <-w.stopCh:
return
}
}
case <-w.stopCh:
return
}
}
// ContainerResourceGatherer is a struct for gathering container resource.
type ContainerResourceGatherer struct {
client clientset.Interface
stopCh chan struct{}
workers []resourceGatherWorker
workerWg sync.WaitGroup
containerIDs []string
options ResourceGathererOptions
}
// ResourceGathererOptions is a struct to hold options for resource.
type ResourceGathererOptions struct {
InKubemark bool
Nodes NodesSet
ResourceDataGatheringPeriod time.Duration
ProbeDuration time.Duration
PrintVerboseLogs bool
}
// NodesSet is a value of nodes set.
type NodesSet int
const (
// AllNodes means all containers on all nodes.
AllNodes NodesSet = 0
// MasterNodes means all containers on Master nodes only.
MasterNodes NodesSet = 1
// MasterAndDNSNodes means all containers on Master nodes and DNS containers on other nodes.
MasterAndDNSNodes NodesSet = 2
)
// NewResourceUsageGatherer returns a new ContainerResourceGatherer.
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) {
g := ContainerResourceGatherer{
client: c,
stopCh: make(chan struct{}),
containerIDs: make([]string, 0),
options: options,
}
if options.InKubemark {
g.workerWg.Add(1)
g.workers = append(g.workers, resourceGatherWorker{
inKubemark: true,
stopCh: g.stopCh,
wg: &g.workerWg,
finished: false,
resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod,
probeDuration: options.ProbeDuration,
printVerboseLogs: options.PrintVerboseLogs,
})
return &g, nil
}
// Tracks kube-system pods if no valid PodList is passed in.
var err error
if pods == nil {
pods, err = c.CoreV1().Pods("kube-system").List(metav1.ListOptions{})
if err != nil {
Logf("Error while listing Pods: %v", err)
return nil, err
}
}
dnsNodes := make(map[string]bool)
for _, pod := range pods.Items {
if (options.Nodes == MasterNodes) && !system.DeprecatedMightBeMasterNode(pod.Spec.NodeName) {
continue
}
if (options.Nodes == MasterAndDNSNodes) && !system.DeprecatedMightBeMasterNode(pod.Spec.NodeName) && pod.Labels["k8s-app"] != "kube-dns" {
continue
}
for _, container := range pod.Status.InitContainerStatuses {
g.containerIDs = append(g.containerIDs, container.Name)
}
for _, container := range pod.Status.ContainerStatuses {
g.containerIDs = append(g.containerIDs, container.Name)
}
if options.Nodes == MasterAndDNSNodes {
dnsNodes[pod.Spec.NodeName] = true
}
}
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
Logf("Error while listing Nodes: %v", err)
return nil, err
}
for _, node := range nodeList.Items {
if options.Nodes == AllNodes || system.DeprecatedMightBeMasterNode(node.Name) || dnsNodes[node.Name] {
g.workerWg.Add(1)
g.workers = append(g.workers, resourceGatherWorker{
c: c,
nodeName: node.Name,
wg: &g.workerWg,
containerIDs: g.containerIDs,
stopCh: g.stopCh,
finished: false,
inKubemark: false,
resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod,
probeDuration: options.ProbeDuration,
printVerboseLogs: options.PrintVerboseLogs,
})
if options.Nodes == MasterNodes {
break
}
}
}
return &g, nil
}
// StartGatheringData starts a stat gathering worker blocks for each node to track,
// and blocks until StopAndSummarize is called.
func (g *ContainerResourceGatherer) StartGatheringData() {
if len(g.workers) == 0 {
return
}
delayPeriod := g.options.ResourceDataGatheringPeriod / time.Duration(len(g.workers))
delay := time.Duration(0)
for i := range g.workers {
go g.workers[i].gather(delay)
delay += delayPeriod
}
g.workerWg.Wait()
}
// StopAndSummarize stops stat gathering workers, processes the collected stats,
// generates resource summary for the passed-in percentiles, and returns the summary.
// It returns an error if the resource usage at any percentile is beyond the
// specified resource constraints.
func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) {
close(g.stopCh)
Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
finished := make(chan struct{})
go func() {
g.workerWg.Wait()
finished <- struct{}{}
}()
select {
case <-finished:
Logf("Waitgroup finished.")
case <-time.After(2 * time.Minute):
unfinished := make([]string, 0)
for i := range g.workers {
if !g.workers[i].finished {
unfinished = append(unfinished, g.workers[i].nodeName)
}
}
Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
}
if len(percentiles) == 0 {
Logf("Warning! Empty percentile list for stopAndPrintData.")
return &ResourceUsageSummary{}, fmt.Errorf("Failed to get any resource usage data")
}
data := make(map[int]e2ekubelet.ResourceUsagePerContainer)
for i := range g.workers {
if g.workers[i].finished {
stats := computePercentiles(g.workers[i].dataSeries, percentiles)
data = leftMergeData(stats, data)
}
}
// Workers has been stopped. We need to gather data stored in them.
sortedKeys := []string{}
for name := range data[percentiles[0]] {
sortedKeys = append(sortedKeys, name)
}
sort.Strings(sortedKeys)
violatedConstraints := make([]string, 0)
summary := make(ResourceUsageSummary)
for _, perc := range percentiles {
for _, name := range sortedKeys {
usage := data[perc][name]
summary[strconv.Itoa(perc)] = append(summary[strconv.Itoa(perc)], SingleContainerSummary{
Name: name,
CPU: usage.CPUUsageInCores,
Mem: usage.MemoryWorkingSetInBytes,
})
// Verifying 99th percentile of resource usage
if perc != 99 {
continue
}
// Name has a form: <pod_name>/<container_name>
containerName := strings.Split(name, "/")[1]
constraint, ok := constraints[containerName]
if !ok {
continue
}
if usage.CPUUsageInCores > constraint.CPUConstraint {
violatedConstraints = append(
violatedConstraints,
fmt.Sprintf("Container %v is using %v/%v CPU",
name,
usage.CPUUsageInCores,
constraint.CPUConstraint,
),
)
}
if usage.MemoryWorkingSetInBytes > constraint.MemoryConstraint {
violatedConstraints = append(
violatedConstraints,
fmt.Sprintf("Container %v is using %v/%v MB of memory",
name,
float64(usage.MemoryWorkingSetInBytes)/(1024*1024),
float64(constraint.MemoryConstraint)/(1024*1024),
),
)
}
}
}
if len(violatedConstraints) > 0 {
return &summary, fmt.Errorf(strings.Join(violatedConstraints, "\n"))
}
return &summary, nil
}
|
computePercentiles
|
search_index.js
|
var documenterSearchIndex = {"docs":
[{"location":"tutorials/#Tutorials","page":"Tutorials","title":"Tutorials","text":"","category":"section"},{"location":"tutorials/","page":"Tutorials","title":"Tutorials","text":"Machine learning, finite difference, and particle mesh for electrostatics and electronic density calculation and prediction","category":"page"},{"location":"tutorials/","page":"Tutorials","title":"Tutorials","text":"Coming soon to Google Colab!","category":"page"},{"location":"publications/#Publications","page":"Publications","title":"Publications","text":"","category":"section"},{"location":"publications/","page":"Publications","title":"Publications","text":"Preprint: Paul Shen, Michael Herbst, Venkat Viswanathan. Rotation Equivariant Fourier Neural Operators for Learning Symmetry Preserving Transformations on Scalar Fields, Vector Fields, and Higher Order Tensor Fields. Arxiv. 2021.","category":"page"},{"location":"architecture/#Architecture","page":"Architecture","title":"Architecture","text":"","category":"section"},{"location":"architecture/#Tensor-fields-over-grid","page":"Architecture","title":"Tensor fields over grid","text":"","category":"section"},{"location":"architecture/","page":"Architecture","title":"Architecture","text":"Tensor fields are represented as 3d/4d arrays for 2d/3d uniform Cartesian grids with the last dimension for the field component. For example, a 3d vector field would be sized XxYxZx3 and while a 2d scalar field would be XxYx1. We provide","category":"page"},{"location":"architecture/","page":"Architecture","title":"Architecture","text":"We store grid information including resolution, size and origin in Grid. By default, the origin indices are centered in the grid. Julia is 1-indexed, so the origin of a 3x3x3 grid defaults to [2, 2, 2].","category":"page"},{"location":"architecture/","page":"Architecture","title":"Architecture","text":"Grid\r\nField","category":"page"},{"location":"architecture/#Grid","page":"Architecture","title":"Grid","text":"Grid(dx::AbstractFloat, rmax::AbstractFloat; dims = 3, rank_max = 1)\n\n\n\n\n\nGrid(\n dx::AbstractFloat,\n sz::Union{AbstractVector,Tuple};\n origin = nothing,\n rank_max = 1\n )\n\n\n\n\n\n","category":"type"},{"location":"architecture/#Field","page":"Architecture","title":"Field","text":"Field(; grid = nothing, rank = nothing, radfunc = nothing)\n\n\n\n\n\n","category":"function"},{"location":"architecture/#Tensor-field-products-and-operations","page":"Architecture","title":"Tensor field products and operations","text":"","category":"section"},{"location":"architecture/","page":"Architecture","title":"Architecture","text":"All Arrays operations (eg +, -, abs) apply to fields. Local pointwise products however depend on the ranks of input fields and output field. field_prod infers the appropriate pointwise product (eg scalar, dot, cross, matrix vector) from the ranks. For example, locally multiplying two vector fields into a scalar field (ranks 1, 1 -> 0) involves the dot product. We also provide convenience functions for retrieving or computing the pointwise norm and rank.","category":"page"},{"location":"architecture/","page":"Architecture","title":"Architecture","text":"field_prod\r\nfield_norm\r\nfield_rank","category":"page"},{"location":"architecture/#field_prod","page":"Architecture","title":"field_prod","text":"field_prod(x::AbstractArray, y::AbstractArray; rank = nothing)\n\n\n\n\n\n","category":"function"},{"location":"architecture/#field_norm","page":"Architecture","title":"field_norm","text":"function field_norm(field::AbstractArray)\n\n\n\n\n\n","category":"function"},{"location":"architecture/#field_rank","page":"Architecture","title":"field_rank","text":"field_rank(x::AbstractArray)\n\n\n\n\n\n","category":"function"},{"location":"architecture/#Particle-mesh-placement-and-interpolation","page":"Architecture","title":"Particle mesh placement and interpolation","text":"","category":"section"},{"location":"architecture/","page":"Architecture","title":"Architecture","text":"With grid info we can interpolate a tensor field at any location. We can also place a point tensor source (eg scalar particle) anywhere. This particle mesh placement applies integral normalization, so the array value is scaled by 1/dV (or 1/dA). Both work via a proximity weighted average of the closest lattice points (in general up to 4 in 2d and 8 in 3d).","category":"page"},{"location":"architecture/","page":"Architecture","title":"Architecture","text":"put_point_source!","category":"page"},{"location":"architecture/#put_point_source!","page":"Architecture","title":"put_point_source!","text":"put_point_source!(\n field::AbstractArray,\n grid::Grid,\n rvec::AbstractVector,\n val::AbstractVector)\n\n\n\n\n\n","category":"function"},{"location":"architecture/#Linear-operators","page":"Architecture","title":"Linear operators","text":"","category":"section"},{"location":"architecture/","page":"Architecture","title":"Architecture","text":"LinearOperator constructs functions for common differential operators and Green's functions. operators diverging at 0 are zeroed out within rmin. Any custom equivariant operator can also be made by specifying its radial function and ranks of the input output fields.","category":"page"},{"location":"architecture/","page":"Architecture","title":"Architecture","text":"Under the hood, we implement all linear operators as tensor field convolutions between the input field and the impulse response field of the operator. We compute the kernel field as a product of the radial function and the appropriate spherical harmonic tensor. The operator's kernel field has a Grid with origin centered on a lattice point. The output field's components are truncated to have same size and Grid as those of input.","category":"page"},{"location":"architecture/","page":"Architecture","title":"Architecture","text":"Long ranged convolutions are automatically computed in Fourier space by dependency DSP.jl","category":"page"},{"location":"architecture/","page":"Architecture","title":"Architecture","text":"LinearOperator","category":"page"},{"location":"architecture/#LinearOperator","page":"Architecture","title":"LinearOperator","text":"function LinearOperator(\n name;\n dx = nothing,\n rmax = nothing,\n ranks = nothing,\n grid = nothing,\n dims = 3,\n radfunc = nothing,\n rmin = 0.0,\n σ = 1.0\n)\n\n\n\n\n\nfunction (m::LinearOperator)(x::AbstractArray, grid::Grid)\n\n\n\n\n\n","category":"type"},{"location":"architecture/#Machine-learning","page":"Architecture","title":"Machine learning","text":"","category":"section"},{"location":"architecture/","page":"Architecture","title":"Architecture","text":"We can learn rotation equivariant mappings between sets of scalar, vector and tensor fields. EquivLayer constructs neural network layers compatible with Julia's machine learning library Flux.jl.","category":"page"},{"location":"architecture/","page":"Architecture","title":"Architecture","text":"...","category":"page"},{"location":"architecture/","page":"Architecture","title":"Architecture","text":"EquivLayer","category":"page"},{"location":"architecture/#EquivLayer","page":"Architecture","title":"EquivLayer","text":"function EquivLayer(\n name,\n inranks,\n outranks;\n dims = 3,\n dx = 1.0,\n rmax = 1.0,\n rank_max = max(1, inranks..., outranks...),\n σ = identity\n)\n\n\n\n\n\n\n\n\n\n","category":"type"},{"location":"#Home","page":"Home","title":"Home","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"note: Note\nDocumentation website under construction. Expected release of code base in early December 2021.","category":"page"},{"location":"#Synopsis","page":"Home","title":"Synopsis","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"EquivariantOperators.jl is a Julia package implementing equivariant machine learning, finite difference operators and particle mesh methods on scalar, vector and tensor fields over uniform grids in 2d/3d. It's a fully differentiable finite differences engine that can run forwards for simulation or backwards for machine learning and inverse problems. Emphasis is on rotation equivariant operators which consequently preserve symmetry. This includes common differential operators (eg div, curl, Laplacian), Green's functions (eg inverse-square fields, Gaussians, Stokeslet), and parametrized equivariant neural operators.","category":"page"},{"location":"","page":"Home","title":"Home","text":"Tensor fields are represented as multidim arrays supporting particle mesh methods of interpolation and point source placement. Operators are implemented as tensor field convolutions in real or Fourier space using rank appropriate products (eg scalar, dot, cross). For machine learning, we provide tensor convolution, product and nonlinear scaling layers for inferring equivariant mappings between arbitrary sets of tensor fields.","category":"page"},{"location":"#Use-cases","page":"Home","title":"Use cases","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"Machine learning rotation equivariant and symmetry preserving behavior of dynamical systems and solutions to PDEs\nSolving inverse problems via adjoint methods\nApplying finite difference differential operators (eg grad, div) and Green's functions (eg inverse-square fields, Gaussians, Stokeslet) on images and vector fields\nParticle mesh point source placement, interpolation, and Fourier space field calculations","category":"page"},{"location":"","page":"Home","title":"Home","text":"Check out our tutorials on Google Colab and our Arxiv preprint!","category":"page"},{"location":"#Contributors","page":"Home","title":"Contributors","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"Paul Shen ([email protected]), Michael Herbst ([email protected]), PI: Venkat Viswanathan ([email protected])","category":"page"},{"location":"","page":"Home","title":"Home","text":"In consultation with Rachel Kurchin, Dhairya Gandhi, Chris Rackauckas","category":"page"},{"location":"","page":"Home","title":"Home","text":"In collaboration with Julia Computing","category":"page"}]
|
}
|
|
options.go
|
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package krusty
import (
"sigs.k8s.io/kustomize/api/plugins/config"
"sigs.k8s.io/kustomize/api/types"
)
// Options holds high-level kustomize configuration options,
// e.g. are plugins enabled, should the loader be restricted
// to the kustomization root, etc.
type Options struct {
// When true, sort the resources before emitting them,
// per a particular sort order. When false, don't do the
// sort, and instead respect the depth-first resource input
// order as specified by the kustomization file(s).
DoLegacyResourceSort bool
// Restrictions on what can be loaded from the file system.
// See type definition.
LoadRestrictions loadRestrictions
// Create an inventory object for pruning.
DoPrune bool
// Options related to kustomize plugins.
PluginConfig *types.PluginConfig
}
// MakeDefaultOptions returns a default instance of Options.
func MakeDefaultOptions() *Options
|
{
return &Options{
DoLegacyResourceSort: true,
LoadRestrictions: rootOnly,
DoPrune: false,
PluginConfig: config.DefaultPluginConfig(),
}
}
|
|
operation.rs
|
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// Operation shape for `CreateDataset`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`create_dataset`](crate::client::Client::create_dataset).
///
/// See [`crate::client::fluent_builders::CreateDataset`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct CreateDataset {
_private: (),
}
impl CreateDataset {
/// Creates a new builder-style object to manufacture [`CreateDatasetInput`](crate::input::CreateDatasetInput)
pub fn builder() -> crate::input::create_dataset_input::Builder {
crate::input::create_dataset_input::Builder::default()
}
/// Creates a new `CreateDataset` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for CreateDataset {
type Output =
std::result::Result<crate::output::CreateDatasetOutput, crate::error::CreateDatasetError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 202 {
crate::operation_deser::parse_create_dataset_error(response)
} else {
crate::operation_deser::parse_create_dataset_response(response)
}
}
}
/// Operation shape for `CreateModel`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`create_model`](crate::client::Client::create_model).
///
/// See [`crate::client::fluent_builders::CreateModel`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct CreateModel {
_private: (),
}
impl CreateModel {
/// Creates a new builder-style object to manufacture [`CreateModelInput`](crate::input::CreateModelInput)
pub fn builder() -> crate::input::create_model_input::Builder {
crate::input::create_model_input::Builder::default()
}
/// Creates a new `CreateModel` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for CreateModel {
type Output =
std::result::Result<crate::output::CreateModelOutput, crate::error::CreateModelError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 202 {
crate::operation_deser::parse_create_model_error(response)
} else {
crate::operation_deser::parse_create_model_response(response)
}
}
}
/// Operation shape for `CreateProject`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`create_project`](crate::client::Client::create_project).
///
/// See [`crate::client::fluent_builders::CreateProject`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct
|
{
_private: (),
}
impl CreateProject {
/// Creates a new builder-style object to manufacture [`CreateProjectInput`](crate::input::CreateProjectInput)
pub fn builder() -> crate::input::create_project_input::Builder {
crate::input::create_project_input::Builder::default()
}
/// Creates a new `CreateProject` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for CreateProject {
type Output =
std::result::Result<crate::output::CreateProjectOutput, crate::error::CreateProjectError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_create_project_error(response)
} else {
crate::operation_deser::parse_create_project_response(response)
}
}
}
/// Operation shape for `DeleteDataset`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`delete_dataset`](crate::client::Client::delete_dataset).
///
/// See [`crate::client::fluent_builders::DeleteDataset`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DeleteDataset {
_private: (),
}
impl DeleteDataset {
/// Creates a new builder-style object to manufacture [`DeleteDatasetInput`](crate::input::DeleteDatasetInput)
pub fn builder() -> crate::input::delete_dataset_input::Builder {
crate::input::delete_dataset_input::Builder::default()
}
/// Creates a new `DeleteDataset` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for DeleteDataset {
type Output =
std::result::Result<crate::output::DeleteDatasetOutput, crate::error::DeleteDatasetError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 202 {
crate::operation_deser::parse_delete_dataset_error(response)
} else {
crate::operation_deser::parse_delete_dataset_response(response)
}
}
}
/// Operation shape for `DeleteModel`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`delete_model`](crate::client::Client::delete_model).
///
/// See [`crate::client::fluent_builders::DeleteModel`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DeleteModel {
_private: (),
}
impl DeleteModel {
/// Creates a new builder-style object to manufacture [`DeleteModelInput`](crate::input::DeleteModelInput)
pub fn builder() -> crate::input::delete_model_input::Builder {
crate::input::delete_model_input::Builder::default()
}
/// Creates a new `DeleteModel` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for DeleteModel {
type Output =
std::result::Result<crate::output::DeleteModelOutput, crate::error::DeleteModelError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 202 {
crate::operation_deser::parse_delete_model_error(response)
} else {
crate::operation_deser::parse_delete_model_response(response)
}
}
}
/// Operation shape for `DeleteProject`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`delete_project`](crate::client::Client::delete_project).
///
/// See [`crate::client::fluent_builders::DeleteProject`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DeleteProject {
_private: (),
}
impl DeleteProject {
/// Creates a new builder-style object to manufacture [`DeleteProjectInput`](crate::input::DeleteProjectInput)
pub fn builder() -> crate::input::delete_project_input::Builder {
crate::input::delete_project_input::Builder::default()
}
/// Creates a new `DeleteProject` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for DeleteProject {
type Output =
std::result::Result<crate::output::DeleteProjectOutput, crate::error::DeleteProjectError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_delete_project_error(response)
} else {
crate::operation_deser::parse_delete_project_response(response)
}
}
}
/// Operation shape for `DescribeDataset`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`describe_dataset`](crate::client::Client::describe_dataset).
///
/// See [`crate::client::fluent_builders::DescribeDataset`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DescribeDataset {
_private: (),
}
impl DescribeDataset {
/// Creates a new builder-style object to manufacture [`DescribeDatasetInput`](crate::input::DescribeDatasetInput)
pub fn builder() -> crate::input::describe_dataset_input::Builder {
crate::input::describe_dataset_input::Builder::default()
}
/// Creates a new `DescribeDataset` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for DescribeDataset {
type Output = std::result::Result<
crate::output::DescribeDatasetOutput,
crate::error::DescribeDatasetError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_describe_dataset_error(response)
} else {
crate::operation_deser::parse_describe_dataset_response(response)
}
}
}
/// Operation shape for `DescribeModel`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`describe_model`](crate::client::Client::describe_model).
///
/// See [`crate::client::fluent_builders::DescribeModel`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DescribeModel {
_private: (),
}
impl DescribeModel {
/// Creates a new builder-style object to manufacture [`DescribeModelInput`](crate::input::DescribeModelInput)
pub fn builder() -> crate::input::describe_model_input::Builder {
crate::input::describe_model_input::Builder::default()
}
/// Creates a new `DescribeModel` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for DescribeModel {
type Output =
std::result::Result<crate::output::DescribeModelOutput, crate::error::DescribeModelError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_describe_model_error(response)
} else {
crate::operation_deser::parse_describe_model_response(response)
}
}
}
/// Operation shape for `DescribeProject`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`describe_project`](crate::client::Client::describe_project).
///
/// See [`crate::client::fluent_builders::DescribeProject`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DescribeProject {
_private: (),
}
impl DescribeProject {
/// Creates a new builder-style object to manufacture [`DescribeProjectInput`](crate::input::DescribeProjectInput)
pub fn builder() -> crate::input::describe_project_input::Builder {
crate::input::describe_project_input::Builder::default()
}
/// Creates a new `DescribeProject` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for DescribeProject {
type Output = std::result::Result<
crate::output::DescribeProjectOutput,
crate::error::DescribeProjectError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_describe_project_error(response)
} else {
crate::operation_deser::parse_describe_project_response(response)
}
}
}
/// Operation shape for `DetectAnomalies`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`detect_anomalies`](crate::client::Client::detect_anomalies).
///
/// See [`crate::client::fluent_builders::DetectAnomalies`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DetectAnomalies {
_private: (),
}
impl DetectAnomalies {
/// Creates a new builder-style object to manufacture [`DetectAnomaliesInput`](crate::input::DetectAnomaliesInput)
pub fn builder() -> crate::input::detect_anomalies_input::Builder {
crate::input::detect_anomalies_input::Builder::default()
}
/// Creates a new `DetectAnomalies` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for DetectAnomalies {
type Output = std::result::Result<
crate::output::DetectAnomaliesOutput,
crate::error::DetectAnomaliesError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_detect_anomalies_error(response)
} else {
crate::operation_deser::parse_detect_anomalies_response(response)
}
}
}
/// Operation shape for `ListDatasetEntries`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`list_dataset_entries`](crate::client::Client::list_dataset_entries).
///
/// See [`crate::client::fluent_builders::ListDatasetEntries`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListDatasetEntries {
_private: (),
}
impl ListDatasetEntries {
/// Creates a new builder-style object to manufacture [`ListDatasetEntriesInput`](crate::input::ListDatasetEntriesInput)
pub fn builder() -> crate::input::list_dataset_entries_input::Builder {
crate::input::list_dataset_entries_input::Builder::default()
}
/// Creates a new `ListDatasetEntries` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for ListDatasetEntries {
type Output = std::result::Result<
crate::output::ListDatasetEntriesOutput,
crate::error::ListDatasetEntriesError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_dataset_entries_error(response)
} else {
crate::operation_deser::parse_list_dataset_entries_response(response)
}
}
}
/// Operation shape for `ListModels`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`list_models`](crate::client::Client::list_models).
///
/// See [`crate::client::fluent_builders::ListModels`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListModels {
_private: (),
}
impl ListModels {
/// Creates a new builder-style object to manufacture [`ListModelsInput`](crate::input::ListModelsInput)
pub fn builder() -> crate::input::list_models_input::Builder {
crate::input::list_models_input::Builder::default()
}
/// Creates a new `ListModels` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for ListModels {
type Output =
std::result::Result<crate::output::ListModelsOutput, crate::error::ListModelsError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_models_error(response)
} else {
crate::operation_deser::parse_list_models_response(response)
}
}
}
/// Operation shape for `ListProjects`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`list_projects`](crate::client::Client::list_projects).
///
/// See [`crate::client::fluent_builders::ListProjects`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListProjects {
_private: (),
}
impl ListProjects {
/// Creates a new builder-style object to manufacture [`ListProjectsInput`](crate::input::ListProjectsInput)
pub fn builder() -> crate::input::list_projects_input::Builder {
crate::input::list_projects_input::Builder::default()
}
/// Creates a new `ListProjects` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for ListProjects {
type Output =
std::result::Result<crate::output::ListProjectsOutput, crate::error::ListProjectsError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_projects_error(response)
} else {
crate::operation_deser::parse_list_projects_response(response)
}
}
}
/// Operation shape for `ListTagsForResource`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`list_tags_for_resource`](crate::client::Client::list_tags_for_resource).
///
/// See [`crate::client::fluent_builders::ListTagsForResource`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListTagsForResource {
_private: (),
}
impl ListTagsForResource {
/// Creates a new builder-style object to manufacture [`ListTagsForResourceInput`](crate::input::ListTagsForResourceInput)
pub fn builder() -> crate::input::list_tags_for_resource_input::Builder {
crate::input::list_tags_for_resource_input::Builder::default()
}
/// Creates a new `ListTagsForResource` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for ListTagsForResource {
type Output = std::result::Result<
crate::output::ListTagsForResourceOutput,
crate::error::ListTagsForResourceError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_tags_for_resource_error(response)
} else {
crate::operation_deser::parse_list_tags_for_resource_response(response)
}
}
}
/// Operation shape for `StartModel`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`start_model`](crate::client::Client::start_model).
///
/// See [`crate::client::fluent_builders::StartModel`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct StartModel {
_private: (),
}
impl StartModel {
/// Creates a new builder-style object to manufacture [`StartModelInput`](crate::input::StartModelInput)
pub fn builder() -> crate::input::start_model_input::Builder {
crate::input::start_model_input::Builder::default()
}
/// Creates a new `StartModel` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for StartModel {
type Output =
std::result::Result<crate::output::StartModelOutput, crate::error::StartModelError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 202 {
crate::operation_deser::parse_start_model_error(response)
} else {
crate::operation_deser::parse_start_model_response(response)
}
}
}
/// Operation shape for `StopModel`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`stop_model`](crate::client::Client::stop_model).
///
/// See [`crate::client::fluent_builders::StopModel`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct StopModel {
_private: (),
}
impl StopModel {
/// Creates a new builder-style object to manufacture [`StopModelInput`](crate::input::StopModelInput)
pub fn builder() -> crate::input::stop_model_input::Builder {
crate::input::stop_model_input::Builder::default()
}
/// Creates a new `StopModel` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for StopModel {
type Output = std::result::Result<crate::output::StopModelOutput, crate::error::StopModelError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 202 {
crate::operation_deser::parse_stop_model_error(response)
} else {
crate::operation_deser::parse_stop_model_response(response)
}
}
}
/// Operation shape for `TagResource`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`tag_resource`](crate::client::Client::tag_resource).
///
/// See [`crate::client::fluent_builders::TagResource`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct TagResource {
_private: (),
}
impl TagResource {
/// Creates a new builder-style object to manufacture [`TagResourceInput`](crate::input::TagResourceInput)
pub fn builder() -> crate::input::tag_resource_input::Builder {
crate::input::tag_resource_input::Builder::default()
}
/// Creates a new `TagResource` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for TagResource {
type Output =
std::result::Result<crate::output::TagResourceOutput, crate::error::TagResourceError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_tag_resource_error(response)
} else {
crate::operation_deser::parse_tag_resource_response(response)
}
}
}
/// Operation shape for `UntagResource`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`untag_resource`](crate::client::Client::untag_resource).
///
/// See [`crate::client::fluent_builders::UntagResource`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct UntagResource {
_private: (),
}
impl UntagResource {
/// Creates a new builder-style object to manufacture [`UntagResourceInput`](crate::input::UntagResourceInput)
pub fn builder() -> crate::input::untag_resource_input::Builder {
crate::input::untag_resource_input::Builder::default()
}
/// Creates a new `UntagResource` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for UntagResource {
type Output =
std::result::Result<crate::output::UntagResourceOutput, crate::error::UntagResourceError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_untag_resource_error(response)
} else {
crate::operation_deser::parse_untag_resource_response(response)
}
}
}
/// Operation shape for `UpdateDatasetEntries`.
///
/// This is usually constructed for you using the the fluent builder returned by
/// [`update_dataset_entries`](crate::client::Client::update_dataset_entries).
///
/// See [`crate::client::fluent_builders::UpdateDatasetEntries`] for more details about the operation.
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct UpdateDatasetEntries {
_private: (),
}
impl UpdateDatasetEntries {
/// Creates a new builder-style object to manufacture [`UpdateDatasetEntriesInput`](crate::input::UpdateDatasetEntriesInput)
pub fn builder() -> crate::input::update_dataset_entries_input::Builder {
crate::input::update_dataset_entries_input::Builder::default()
}
/// Creates a new `UpdateDatasetEntries` operation.
pub fn new() -> Self {
Self { _private: () }
}
}
impl aws_smithy_http::response::ParseStrictResponse for UpdateDatasetEntries {
type Output = std::result::Result<
crate::output::UpdateDatasetEntriesOutput,
crate::error::UpdateDatasetEntriesError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 202 {
crate::operation_deser::parse_update_dataset_entries_error(response)
} else {
crate::operation_deser::parse_update_dataset_entries_response(response)
}
}
}
|
CreateProject
|
search_interest.rs
|
//! Represent keywords interest over time.
//!
//! Numbers represent search interest relative to the highest point on the chart for the given region and time.
//! A value of 100 is the peak popularity for the term. A value of 50 means that the term is half as popular.
//! A score of 0 means there was not enough data for this term.
use crate::Client;
use crate::request_handler::Query;
use serde_json::Value;
#[derive(Clone, Debug, Default)]
pub struct SearchInterest {
pub client: Client,
}
impl SearchInterest {
/// Create a `SearchInterest` instance.
///
/// Returns a `SearchInterest` instance
pub fn new(client: Client) -> Self {
Self { client }
}
|
///
/// Returns a JSON serde Value (`serde_json::Value`).
/// ```
/// # use rtrend::{Country, Keywords, Client, SearchInterest};
/// let keywords = Keywords::new(vec!["Candy"]);
/// let country = Country::US;
///
/// let client = Client::new(keywords, country).build();
///
/// let search_interest = SearchInterest::new(client).get();
///
/// println!("{}", search_interest);
/// ```
pub fn get(&self) -> Value {
self.send_request()[0].clone()
}
}
|
/// Retrieve line chart data (Timeseries data) for all keywords.
///
/// Retrieve data for all keywords set within the client.
|
options.go
|
package mock
import (
"github.com/chinahtl/go-micro/v3/client"
)
// Response sets the response methods for a service
func
|
(service string, response []MockResponse) client.Option {
return func(o *client.Options) {
r, ok := fromContext(o.Context)
if !ok {
r = make(map[string][]MockResponse)
}
r[service] = response
o.Context = newContext(o.Context, r)
}
}
|
Response
|
__main__.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
|
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import, unicode_literals
import sys
import argparse
import glyphsLib
description = """\n
Converts a Glyphs.app source file into UFO masters
or UFO instances and MutatorMath designspace.
"""
def parse_options(args):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--version", action="version",
version='glyphsLib %s' % (glyphsLib.__version__))
parser.add_argument("-g", "--glyphs", metavar="GLYPHS", required=True,
help="Glyphs file to convert.")
parser.add_argument("-m", "--masters", metavar="MASTERS",
default="master_ufo",
help="Ouput masters UFO to folder MASTERS. "
"(default: %(default)s)")
parser.add_argument("-n", "--instances", metavar="INSTANCES", nargs="?",
const="instance_ufo", default=None,
help="Output and generate interpolated instances UFO "
"to folder INSTANCES. "
"(default: %(const)s)")
parser.add_argument("-r", "--round-instances", action="store_true",
help="Apply integer rounding to all geometry when "
"interpolating")
options = parser.parse_args(args)
return options
def main(args=None):
opt = parse_options(args)
if opt.glyphs is not None:
if opt.instances is None:
glyphsLib.build_masters(opt.glyphs, opt.masters)
else:
glyphsLib.build_instances(opt.glyphs, opt.masters, opt.instances,
round_geometry=opt.round_instances)
if __name__ == '__main__':
main(sys.argv[1:])
| |
prclr1.rs
|
#[doc = "Writer for register PRCLR1"]
pub type W = crate::W<u32, super::PRCLR1>;
#[doc = "Register PRCLR1 `reset()`'s with value 0"]
impl crate::ResetValue for super::PRCLR1 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "CCU43 Reset Clear\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CCU43RS_AW {
#[doc = "0: No effect"]
VALUE1 = 0,
#[doc = "1: De-assert reset"]
VALUE2 = 1,
}
impl From<CCU43RS_AW> for bool {
#[inline(always)]
fn from(variant: CCU43RS_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Write proxy for field `CCU43RS`"]
pub struct CCU43RS_W<'a> {
w: &'a mut W,
}
impl<'a> CCU43RS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn
|
(self, variant: CCU43RS_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "No effect"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(CCU43RS_AW::VALUE1)
}
#[doc = "De-assert reset"]
#[inline(always)]
pub fn value2(self) -> &'a mut W {
self.variant(CCU43RS_AW::VALUE2)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "LEDTS Reset Clear\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum LEDTSCU0RS_AW {
#[doc = "0: No effect"]
VALUE1 = 0,
#[doc = "1: De-assert reset"]
VALUE2 = 1,
}
impl From<LEDTSCU0RS_AW> for bool {
#[inline(always)]
fn from(variant: LEDTSCU0RS_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Write proxy for field `LEDTSCU0RS`"]
pub struct LEDTSCU0RS_W<'a> {
w: &'a mut W,
}
impl<'a> LEDTSCU0RS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: LEDTSCU0RS_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "No effect"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(LEDTSCU0RS_AW::VALUE1)
}
#[doc = "De-assert reset"]
#[inline(always)]
pub fn value2(self) -> &'a mut W {
self.variant(LEDTSCU0RS_AW::VALUE2)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "MultiCAN Reset Clear\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum MCAN0RS_AW {
#[doc = "0: No effect"]
VALUE1 = 0,
#[doc = "1: De-assert reset"]
VALUE2 = 1,
}
impl From<MCAN0RS_AW> for bool {
#[inline(always)]
fn from(variant: MCAN0RS_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Write proxy for field `MCAN0RS`"]
pub struct MCAN0RS_W<'a> {
w: &'a mut W,
}
impl<'a> MCAN0RS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: MCAN0RS_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "No effect"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(MCAN0RS_AW::VALUE1)
}
#[doc = "De-assert reset"]
#[inline(always)]
pub fn value2(self) -> &'a mut W {
self.variant(MCAN0RS_AW::VALUE2)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "DAC Reset Clear\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DACRS_AW {
#[doc = "0: No effect"]
VALUE1 = 0,
#[doc = "1: De-assert reset"]
VALUE2 = 1,
}
impl From<DACRS_AW> for bool {
#[inline(always)]
fn from(variant: DACRS_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Write proxy for field `DACRS`"]
pub struct DACRS_W<'a> {
w: &'a mut W,
}
impl<'a> DACRS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: DACRS_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "No effect"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(DACRS_AW::VALUE1)
}
#[doc = "De-assert reset"]
#[inline(always)]
pub fn value2(self) -> &'a mut W {
self.variant(DACRS_AW::VALUE2)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "MMC Interface Reset Clear\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum MMCIRS_AW {
#[doc = "0: No effect"]
VALUE1 = 0,
#[doc = "1: De-assert reset"]
VALUE2 = 1,
}
impl From<MMCIRS_AW> for bool {
#[inline(always)]
fn from(variant: MMCIRS_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Write proxy for field `MMCIRS`"]
pub struct MMCIRS_W<'a> {
w: &'a mut W,
}
impl<'a> MMCIRS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: MMCIRS_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "No effect"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(MMCIRS_AW::VALUE1)
}
#[doc = "De-assert reset"]
#[inline(always)]
pub fn value2(self) -> &'a mut W {
self.variant(MMCIRS_AW::VALUE2)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "USIC1 Reset Clear\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum USIC1RS_AW {
#[doc = "0: No effect"]
VALUE1 = 0,
#[doc = "1: De-assert reset"]
VALUE2 = 1,
}
impl From<USIC1RS_AW> for bool {
#[inline(always)]
fn from(variant: USIC1RS_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Write proxy for field `USIC1RS`"]
pub struct USIC1RS_W<'a> {
w: &'a mut W,
}
impl<'a> USIC1RS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: USIC1RS_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "No effect"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(USIC1RS_AW::VALUE1)
}
#[doc = "De-assert reset"]
#[inline(always)]
pub fn value2(self) -> &'a mut W {
self.variant(USIC1RS_AW::VALUE2)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "USIC2 Reset Clear\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum USIC2RS_AW {
#[doc = "0: No effect"]
VALUE1 = 0,
#[doc = "1: De-assert reset"]
VALUE2 = 1,
}
impl From<USIC2RS_AW> for bool {
#[inline(always)]
fn from(variant: USIC2RS_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Write proxy for field `USIC2RS`"]
pub struct USIC2RS_W<'a> {
w: &'a mut W,
}
impl<'a> USIC2RS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: USIC2RS_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "No effect"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(USIC2RS_AW::VALUE1)
}
#[doc = "De-assert reset"]
#[inline(always)]
pub fn value2(self) -> &'a mut W {
self.variant(USIC2RS_AW::VALUE2)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "PORTS Reset Clear\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PPORTSRS_AW {
#[doc = "0: No effect"]
VALUE1 = 0,
#[doc = "1: De-assert reset"]
VALUE2 = 1,
}
impl From<PPORTSRS_AW> for bool {
#[inline(always)]
fn from(variant: PPORTSRS_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Write proxy for field `PPORTSRS`"]
pub struct PPORTSRS_W<'a> {
w: &'a mut W,
}
impl<'a> PPORTSRS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PPORTSRS_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "No effect"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(PPORTSRS_AW::VALUE1)
}
#[doc = "De-assert reset"]
#[inline(always)]
pub fn value2(self) -> &'a mut W {
self.variant(PPORTSRS_AW::VALUE2)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
impl W {
#[doc = "Bit 0 - CCU43 Reset Clear"]
#[inline(always)]
pub fn ccu43rs(&mut self) -> CCU43RS_W {
CCU43RS_W { w: self }
}
#[doc = "Bit 3 - LEDTS Reset Clear"]
#[inline(always)]
pub fn ledtscu0rs(&mut self) -> LEDTSCU0RS_W {
LEDTSCU0RS_W { w: self }
}
#[doc = "Bit 4 - MultiCAN Reset Clear"]
#[inline(always)]
pub fn mcan0rs(&mut self) -> MCAN0RS_W {
MCAN0RS_W { w: self }
}
#[doc = "Bit 5 - DAC Reset Clear"]
#[inline(always)]
pub fn dacrs(&mut self) -> DACRS_W {
DACRS_W { w: self }
}
#[doc = "Bit 6 - MMC Interface Reset Clear"]
#[inline(always)]
pub fn mmcirs(&mut self) -> MMCIRS_W {
MMCIRS_W { w: self }
}
#[doc = "Bit 7 - USIC1 Reset Clear"]
#[inline(always)]
pub fn usic1rs(&mut self) -> USIC1RS_W {
USIC1RS_W { w: self }
}
#[doc = "Bit 8 - USIC2 Reset Clear"]
#[inline(always)]
pub fn usic2rs(&mut self) -> USIC2RS_W {
USIC2RS_W { w: self }
}
#[doc = "Bit 9 - PORTS Reset Clear"]
#[inline(always)]
pub fn pportsrs(&mut self) -> PPORTSRS_W {
PPORTSRS_W { w: self }
}
}
|
variant
|
base.rs
|
use crate::ctx_desc::{
ContextDescriptorFlags, ContextDescriptorKind, ExtensionContextDescriptor,
ModuleContextDescriptor, ProtocolContextDescriptor, TypeContextDescriptor,
};
use std::{fmt, hint, ptr};
use swift_sys::{
ctx_desc::ContextDescriptor as RawContextDescriptor, ptr::RelativeIndirectablePointer,
};
/// Base class for all context descriptors.
#[repr(transparent)]
pub struct ContextDescriptor {
raw: RawContextDescriptor,
}
impl AsRef<ContextDescriptor> for ContextDescriptor {
#[inline]
fn as_ref(&self) -> &Self {
self
}
}
unsafe impl Send for ContextDescriptor {}
unsafe impl Sync for ContextDescriptor {}
impl fmt::Debug for ContextDescriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Format as the specific context descriptor type.
//
// `fmt` is called with the type's name to ensure that the correct
// implementation calls, and that this does not infinitely recurse.
match self.kind() {
ContextDescriptorKind::MODULE => ModuleContextDescriptor::fmt(
unsafe { &*(self as *const Self as *const ModuleContextDescriptor) },
f,
),
ContextDescriptorKind::EXTENSION => ExtensionContextDescriptor::fmt(
unsafe { &*(self as *const Self as *const ExtensionContextDescriptor) },
f,
),
ContextDescriptorKind::PROTOCOL => ProtocolContextDescriptor::fmt(
unsafe { &*(self as *const Self as *const ProtocolContextDescriptor) },
f,
),
// This case also handles classes and structs.
kind if kind.is_type() => TypeContextDescriptor::fmt(
unsafe { &*(self as *const Self as *const TypeContextDescriptor) },
f,
),
// Default to "unknown" descriptor.
_ => f
.debug_struct("UnknownContextDescriptor")
.field("flags", &self.flags())
.field("parent", &self.parent())
.finish(),
}
}
}
impl ContextDescriptor {
/// Creates an instance from a raw context descriptor value.
///
/// # Safety
///
/// - The resulting location where `self` is placed must be correct for the
/// fields of the raw value.
///
/// - Invariants indicated by the context descriptor flags must be upheld.
/// For example, if they indicate extra fields, those must exist relative
/// to the resulting location.
#[inline]
pub const unsafe fn from_raw(raw: RawContextDescriptor) -> Self {
Self { raw }
}
/// Extracts the inner raw context descriptor value.
#[inline]
pub const fn into_raw(self) -> RawContextDescriptor {
self.raw
}
}
impl ContextDescriptor {
/// Creates a new context descriptor.
///
/// # Safety
///
/// - The descriptor must have a memory layout appropriate for the type of
/// descriptor indicated by `flags`. This often includes data that is
/// placed immediately after the created instance.
///
/// - `parent` must point to a valid descriptor that can represent a parent
/// of the created descriptor. It must also be appropriate for the
/// descriptor kind.
#[inline]
pub const unsafe fn new(
flags: ContextDescriptorFlags,
parent: RelativeIndirectablePointer<ContextDescriptor>,
) -> Self {
Self {
raw: RawContextDescriptor {
flags,
parent: parent.cast(),
},
}
}
/// Returns flags describing this context.
#[inline]
pub fn flags(&self) -> ContextDescriptorFlags {
self.raw.flags
}
/// Returns the kind of this context descriptor.
#[inline]
pub fn kind(&self) -> ContextDescriptorKind {
self.raw.flags.kind()
}
/// Returns the parent context, or `None` if this is a top-level context.
#[inline]
pub fn parent(&self) -> Option<&ContextDescriptor> {
unsafe { self.parent_ptr().as_ref() }
}
/// Returns a relative pointer to the parent context.
#[inline]
pub fn parent_ptr(&self) -> &RelativeIndirectablePointer<ContextDescriptor> {
self.raw.parent.cast_by_ref()
}
/// Returns an iterator over the parent contexts of `self`.
#[inline]
pub fn parent_iter(&self) -> impl Iterator<Item = &ContextDescriptor> + Copy {
#[derive(Copy, Clone)]
struct Iter<'a>(&'a ContextDescriptor);
impl<'a> Iterator for Iter<'a> {
type Item = &'a ContextDescriptor;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
let parent = self.0.parent()?;
self.0 = parent;
Some(parent)
}
}
// There are no more parents after the root is reached.
impl std::iter::FusedIterator for Iter<'_> {}
Iter(self)
}
/// Returns `true` if the given context descriptor is in the parent
/// hierarchy of `self`.
pub fn has_parent(&self, desc: &ContextDescriptor) -> bool {
self.parent_iter().any(|parent| ptr::eq(parent, desc))
}
/// Returns the module context for `self`.
#[inline]
pub fn module_context(&self) -> &ModuleContextDescriptor {
let mut current = self;
loop {
if let Some(module) = current.as_module() {
return module;
} else if let Some(parent) = current.parent() {
current = parent;
} else {
// The runtime assumes that all context chains should eventually
// find a module.
unsafe { hint::unreachable_unchecked() };
}
}
}
}
/// Casting to subtypes.
impl ContextDescriptor {
/// Casts this context descriptor to a module descriptor if it is one.
#[inline]
pub fn as_module(&self) -> Option<&ModuleContextDescriptor> {
if self.kind() == ContextDescriptorKind::MODULE
|
else {
None
}
}
/// Casts this context descriptor to an extension descriptor if it is one.
#[inline]
pub fn as_extension(&self) -> Option<&ExtensionContextDescriptor> {
if self.kind() == ContextDescriptorKind::EXTENSION {
Some(unsafe { &*(self as *const _ as *const _) })
} else {
None
}
}
/// Casts this context descriptor to a nominal type descriptor if it is one.
#[inline]
pub fn as_type(&self) -> Option<&TypeContextDescriptor> {
if self.kind().is_type() {
Some(unsafe { &*(self as *const _ as *const _) })
} else {
None
}
}
}
|
{
Some(unsafe { &*(self as *const _ as *const _) })
}
|
rewriterequest.js
|
export function
|
(){
try {
const originRequest = wx.request;
Object.defineProperty(wx, 'request', {
configurable:true,
enumerable: true,
writable: true,
value: function(){
let options = arguments[0] || {};
//对于发送错误信息的接口不收集,防止死循环
var regexp = new RegExp("https://xxxx/error","g");
if (regexp.test(options.url)) {
//这里要执行原来的方法
return originRequest.call(this, options)
}
//这里拦截请求成功或失败接口,拿到请求后的数据
["success", "fail"].forEach((methodName) => {
let defineMethod = options[methodName];
options[methodName] = function(){
try{ //在重新定义函数中执行原先的函数,不影响正常逻辑
defineMethod && defineMethod.apply(this, arguments);
//开始信息收集
let statusCode, result, msg;
//请求失败
if (methodName == 'fail') {
statusCode = 0;
result = 'fail';
msg = ( arguments[0] && arguments[0].errMsg ) || ""
}
//请求成功,
//收集规则为:
// 1、 statusCode非2xx,3xx
// 2、 statusCode是2xx,3xx,但接口返回result不为ok
if (methodName == 'success') {
let data = arguments[0] || {};
statusCode = data.statusCode || "";
if (data.statusCode && Number(data.statusCode) >= 200 && Number(data.statusCode) < 400 ) {
let resData = data.data ? (typeof data.data == 'object' ? data.data : JSON.parse(data.data)) : {};
//请求成功,不收集
if (resData.result == 'ok') {
return;
}
result = resData.result || "";
msg = resData.msg || "";
}else{
result = "";
msg = data.data || "";
}
}
//过滤掉header中的敏感信息
if (options.header) {
options.header.userid && (delete options.header.userid)
}
//过滤掉data中的敏感信息
if (options.data) {
options.data.userid && (delete options.data.userid)
}
var collectInfo = {
"url": options.url || '', //请求地址
"method": options.method || "GET", //请求方法
"request_header": JSON.stringify(options.header || {}), //请求头部信息
"request_data": JSON.stringify(options.data || {}), //请求参数
"resp_code": statusCode + '', //请求状态码
"resp_result": result, //请求返回结果
"resp_msg": msg, //请求返回描述信息
}
//提交参数与上一次不同,或者参数相同,隔了1s
if (JSON.stringify(collectInfo) != lastParams.paramStr || (new Date().getTime() - lastParams.timestamp > 1000)) {
//上传错误信息
Post.post_error(_miniapp, 'http', collectInfo)
lastParams.paramStr = JSON.stringify(collectInfo);
lastParams.timestamp = new Date().getTime()
}
}catch(e){
//console.log(e);
}
};
})
return originRequest.call(this, options)
}
})
} catch (e) {
// Do something when catch error
}
}
|
rewriteRequest
|
gloo-arrows.py
|
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import app, gl, gloo
from glumpy.transforms import Position, OrthographicProjection, PanZoom
# Create window
window = app.Window(width=2*512, height=512, color=(1,1,1,1))
# What to draw when necessary
@window.event
def on_draw(dt):
|
# Setup some markers
n = 500+1
data = np.zeros(n, dtype=[('position', np.float32, 2),
('fg_color', np.float32, 4),
('bg_color', np.float32, 4),
('size', np.float32, 1),
('head', np.float32, 1),
('orientation', np.float32, 1),
('linewidth', np.float32, 1)])
data = data.view(gloo.VertexBuffer)
data['linewidth'] = 1
data['fg_color'] = 0, 0, 0, 1
data['bg_color'] = 0, 0, 0, 1
data['orientation'] = 0
data['head'] = 0.25
radius, theta, dtheta = 245.0, 0.0, 6.5 / 180.0 * np.pi
for i in range(500):
theta += dtheta
x = 256 + radius * np.cos(theta)
y = 256 + radius * np.sin(theta)
r = 10.1 - i * 0.01
radius -= 0.4
data['orientation'][i] = theta + np.pi
data['position'][i] = x, y
data['size'][i] = 2 * r
data['linewidth'][i] = 1.5 - 0.5*i/500.
data['position'][-1] = 512+256, 256
data['size'][-1] = 512/np.sqrt(2)
data['linewidth'][-1] = 16.0
data['fg_color'][-1] = 0, 0, 0, 1
data['bg_color'][-1] = .95, .95, .95, 1
data['orientation'][-1] = 0
program = gloo.Program("arrows/arrow.vert", "arrows/arrow.frag")
program.bind(data)
program['antialias'] = 1.00
program['arrow'] = "stealth"
program['paint'] = "filled"
transform = OrthographicProjection(Position("position"))
program['transform'] = transform
window.attach(transform)
app.run()
|
window.clear()
program.draw(gl.GL_POINTS)
program['orientation'][-1] += np.pi/1024.0
|
post.py
|
'''
SIGNUS V1 post API
'''
from flask import g
from app.api.signus_v1 import signus_v1 as api
from app.api.decorators import timer, login_required, login_optional
from app.controllers.post import (post_like,
post_unlike,
post_view)
@api.route("/post/like/<string:post_oid>", methods=["PATCH"])
@timer
@login_required
def signus_v1_post_like(post_oid):
''' 게시글 좋아요 '''
return {
"msg": "success",
"result": post_like(g.mongo_cur,
post_oid,
g.user)
}
@api.route("/post/unlike/<string:post_oid>", methods=["PATCH"])
@timer
@login_required
def signus_v1_post_unlike(post_oid):
''' 게시글 좋아요 취소 '''
return {
"msg": "success",
"result": post_unlike(g.mongo_cur,
post_oid,
g.user)
}
@api.route("/post/view/<string:post_oid>", methods=["PATCH"])
@timer
@login_optional
def signus_v1_post_view(post_oid):
''' 게시글 조회수 '''
if 'user
|
' in g:
result = post_view(g.mongo_cur, post_oid, g.user)
else:
result = post_view(g.mongo_cur, post_oid)
return {
"msg": "success",
"result": result
}
|
|
iforget.js
|
import React, { PropTypes } from 'react'
import { CardActions } from 'material-ui/Card'
import RaisedButton from 'material-ui/RaisedButton'
import { List, ListItem } from 'material-ui/List'
import { Form } from 'formsy-react'
import { FormsyText } from 'formsy-material-ui/lib'
import Lock from 'material-ui/svg-icons/action/lock'
import AccountCircle from 'material-ui/svg-icons/action/account-circle'
import FingerPrint from 'material-ui/svg-icons/action/fingerprint'
import Popup from '../../../components/popup'
import './iforget.scss'
class RegisterView extends React.Component {
static propTypes = {
fetchList: PropTypes.func.isRequired,
valChange: PropTypes.func.isRequired,
modalOpen: PropTypes.func.isRequired,
modalClose: PropTypes.func.isRequired,
modalState: PropTypes.bool.isRequired,
modal: PropTypes.object.isRequired,
directTo: PropTypes.func.isRequired,
isLoading: PropTypes.bool.isRequired,
submitInfo: PropTypes.object.isRequired,
codeInfo: PropTypes.object.isRequired,
canSubmit: PropTypes.bool.isRequired,
disableCodeSend: PropTypes.bool.isRequired
}
static defaultProps = {
codeInfo: {
code: 0,
isCounting: false,
delay: 59
}
}
constructor(props) {
|
this.enableButton = this.enableButton.bind(this)
this.resetCodeInfo = this.resetCodeInfo.bind(this)
this.disableButton = this.disableButton.bind(this)
this.submitForm = this.submitForm.bind(this)
this.handlePhoneChange = this.handlePhoneChange.bind(this)
this.sendCode = this.sendCode.bind(this)
this.countDown = this.countDown.bind(this)
this.phone = ''
}
componentDidMount() {
if (this.props.modalState) {
this.props.modalClose()
}
}
componentWillReceiveProps(nextProps) {
const that = this
const props = that.props
const nextSubmit = nextProps.submitInfo
const nextCode = nextProps.codeInfo
const propsCode = props.codeInfo
if (nextSubmit.code >= 200 && nextSubmit.code < 300) {
setTimeout(() => {
that.props.directTo('/login')
}, 2000)
}
// is counting
if (nextCode.isCounting) {
if (propsCode.delay !== nextCode.delay) {
clearTimeout(that.timer)
if (nextCode.delay > 0) {
that.timer = setTimeout(that.countDown, 1000)
} else if (nextCode.delay <= 0) { // 倒计时完成
setTimeout(that.resetCodeInfo, 1000)
that.timer = null
}
}
}
// 验证码发送成功
if (nextCode.code === 200 && nextCode.isCounting === true && nextCode.delay === 59) {
setTimeout(that.countDown, 1000)
props.valChange(true, 'disableCodeSend')
return
}
}
enableButton() {
this.props.valChange(true, 'canSubmit')
}
disableButton() {
this.props.valChange(false, 'canSubmit')
}
countDown() {
this.props.valChange({
code: 200,
isCounting: true,
delay: this.props.codeInfo.delay - 1
}, 'codeInfo')
}
resetCodeInfo() {
this.props.valChange({
code: 0,
isCounting: false,
delay: 59
}, 'codeInfo')
}
handlePhoneChange(event) {
const phone = event.target.value
this.props.valChange(phone, 'phone')
this.phone = phone
// if (telR.test(phone)) {
// this.props.valChange(phone, 'phone')
// if (!that.props.codeInfo.hasSent) {
// that.sendCode(phone)
// }
// }
}
sendCode() {
const that = this
const props = this.props
const account = this.phone
const telR = /^1[3|4|5|7|8][0-9]\d{8}$/
if (props.isLoading) return
if (!telR.test(account)) {
return
}
if (props.codeInfo.isCounting && props.codeInfo.delay < 59) {
return
}
function codeFilter(data) {
that.hasSent = true
let result = {
code: 0,
isCounting: false,
delay: 59
}
if (data.code === 200) {
result = {
code: 200,
isCounting: true,
delay: 59
}
}
return result
}
// 发送验证码
props.fetchList(
'/firmware/valid_code/',
{ querys: { account, v_type: 'reset_pwd' } },
'codeInfo',
codeFilter
)
}
submitForm(data) {
const { phone, valid_code, password } = data
const account = phone
const queryData = {
method: 'POST',
params: {
account,
password,
valid_code
}
}
this.props.fetchList(
// '/account/bind/phone/',
'/account/reset_pwd/',
queryData,
'submitInfo'
)
}
render() {
const props = this.props
const propsCodeInfo = props.codeInfo
const errorMessages = {
phoneError: '请输入正确的手机号码',
vcodeError: '请输入正确的验证码',
pwdError: '密码在6到16位之间'
}
let { phoneError, vcodeError, pwdError } = errorMessages
let sendBtnTitle
if (propsCodeInfo.isCounting && this.hasSent && propsCodeInfo.code === 200) {
sendBtnTitle = `重新获取(${propsCodeInfo.delay || ''})`
} else {
sendBtnTitle = '获取验证码'
}
let popupEle = props.modalState ?
<Popup
modal={props.modal}
modalClose={props.modalClose}
modalState={props.modalState}
directTo={props.directTo}
/> : ''
return (
<div className="container">
<Form
onValid={this.enableButton}
onInvalid={this.disableButton}
onValidSubmit={this.submitForm}
>
<List>
<ListItem
primaryText={
<FormsyText
name="phone"
validations="isNumeric,isLength:11"
validationError={phoneError}
required
hintText="请输入手机号"
fullWidth={true}
onChange={this.handlePhoneChange}
/>}
leftIcon={<AccountCircle className="register-icon-fix " />}
/>
<ListItem
primaryText={
<FormsyText
name="valid_code"
validations="isNumeric,isLength:4"
validationError={vcodeError}
required
hintText="请输入验证码"
/>}
leftIcon={<FingerPrint className="register-icon-fix " />}
rightIconButton={
<RaisedButton
label={sendBtnTitle}
disabled={this.props.disableCodeSend}
onClick={this.sendCode}
style={{ top: '18px' }}
/>}
/>
<ListItem
primaryText={
<FormsyText
name="password"
type="password"
validations="minLength:6,maxLength:16"
validationError={pwdError}
required
hintText="请输入新密码"
fullWidth={true}
/>}
leftIcon={<Lock className="register-icon-fix " />}
/>
</List>
<CardActions className="fixCenter" >
<RaisedButton
label="修改密码"
type="submit"
style={{height: 50}}
disabled={!this.props.canSubmit}
secondary={true}
fullWidth={true}
/>
</CardActions>
</Form>
{popupEle}
</div>
)
}
}
export default RegisterView
|
super(props)
|
forms.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django import shortcuts
from django.utils.translation import ugettext as _
from novaclient import exceptions as novaclient_exceptions
from horizon import api
from horizon import forms
LOG = logging.getLogger(__name__)
class ReleaseFloatingIp(forms.SelfHandlingForm):
floating_ip_id = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
try:
LOG.info('Releasing Floating IP "%s"' % data['floating_ip_id'])
api.tenant_floating_ip_release(request, data['floating_ip_id'])
messages.info(request, _('Successfully released Floating IP: %s')
% data['floating_ip_id'])
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in ReleaseFloatingIp")
messages.error(request, _('Error releasing Floating IP '
'from tenant: %s') % e.message)
return shortcuts.redirect(request.build_absolute_uri())
class FloatingIpAssociate(forms.SelfHandlingForm):
floating_ip_id = forms.CharField(widget=forms.HiddenInput())
floating_ip = forms.CharField(widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
instance_id = forms.ChoiceField()
def __init__(self, *args, **kwargs):
super(FloatingIpAssociate, self).__init__(*args, **kwargs)
instancelist = kwargs.get('initial', {}).get('instances', [])
self.fields['instance_id'] = forms.ChoiceField(
choices=instancelist,
label=_("Instance"))
def handle(self, request, data):
try:
api.server_add_floating_ip(request,
data['instance_id'],
data['floating_ip_id'])
LOG.info('Associating Floating IP "%s" with Instance "%s"'
% (data['floating_ip'], data['instance_id']))
messages.info(request, _('Successfully associated Floating IP: \
%(ip)s with Instance: %(inst)s'
% {"ip": data['floating_ip'],
"inst": data['instance_id']}))
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in FloatingIpAssociate")
messages.error(request, _('Error associating Floating IP: %s')
% e.message)
return shortcuts.redirect('horizon:nova:floating_ips:index')
class FloatingIpDisassociate(forms.SelfHandlingForm):
floating_ip_id = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
try:
fip = api.tenant_floating_ip_get(request, data['floating_ip_id'])
api.server_remove_floating_ip(request, fip.instance_id, fip.id)
LOG.info('Disassociating Floating IP "%s"'
% data['floating_ip_id'])
messages.info(request,
_('Successfully disassociated Floating IP: %s')
% data['floating_ip_id'])
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in FloatingIpAssociate")
messages.error(request, _('Error disassociating Floating IP: %s')
% e.message)
return shortcuts.redirect('horizon:nova:floating_ips:index')
class FloatingIpAllocate(forms.SelfHandlingForm):
tenant_id = forms.CharField(widget=forms.HiddenInput())
def
|
(self, request, data):
try:
fip = api.tenant_floating_ip_allocate(request)
LOG.info('Allocating Floating IP "%s" to tenant "%s"'
% (fip.ip, data['tenant_id']))
messages.success(request,
_('Successfully allocated Floating IP "%(ip)s"\
to tenant "%(tenant)s"')
% {"ip": fip.ip, "tenant": data['tenant_id']})
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in FloatingIpAllocate")
messages.error(request, _('Error allocating Floating IP "%(ip)s"\
to tenant "%(tenant)s": %(msg)s') %
{"ip": fip.ip, "tenant": data['tenant_id'], "msg": e.message})
return shortcuts.redirect('horizon:nova:floating_ips:index')
|
handle
|
test_enum.py
|
from atom.api import Atom, Enum, Int, Str
class EnumTest(Atom):
|
et = EnumTest()
et.att = 5
et.att = '5'
et.att = 3.4
|
att = Enum(5, '4')
|
leveldb.go
|
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build !js
// Package leveldb implements the key-value database layer based on LevelDB.
package leveldb
import (
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
const (
// degradationWarnInterval specifies how often warning should be printed if the
// leveldb database cannot keep up with requested writes.
degradationWarnInterval = time.Minute
// minCache is the minimum amount of memory in megabytes to allocate to leveldb
// read and write caching, split half and half.
minCache = 16
// minHandles is the minimum number of files handles to allocate to the open
// database files.
minHandles = 16
// metricsGatheringInterval specifies the interval to retrieve leveldb database
// compaction, io and pause stats to report to the user.
metricsGatheringInterval = 3 * time.Second
)
// Database is a persistent key-value store. Apart from basic data storage
// functionality it also supports batch writes and iterating over the keyspace in
// binary-alphabetical order.
type Database struct {
fn string // filename for reporting
db *leveldb.DB // LevelDB instance
compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction
compReadMeter metrics.Meter // Meter for measuring the data read during compaction
compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction
writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction
diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read
diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written
quitLock sync.Mutex // Mutex protecting the quit channel access
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
log log.Logger // Contextual logger tracking the database path
}
// New returns a wrapped LevelDB object. The namespace is the prefix that the
// metrics reporting should use for surfacing internal stats.
func New(file string, cache int, handles int, namespace string) (*Database, error) {
// Ensure we have some minimal caching and file guarantees
if cache < minCache {
cache = minCache
}
if handles < minHandles {
handles = minHandles
}
logger := log.New("database", file)
logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles)
// Open the db and recover any potential corruptions
db, err := leveldb.OpenFile(file, &opt.Options{
OpenFilesCacheCapacity: handles,
BlockCacheCapacity: cache / 2 * opt.MiB,
WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally
Filter: filter.NewBloomFilter(10),
})
if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
db, err = leveldb.RecoverFile(file, nil)
}
if err != nil {
return nil, err
}
// Assemble the wrapper with all the registered metrics
ldb := &Database{
fn: file,
db: db,
log: logger,
quitChan: make(chan chan error),
}
ldb.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil)
ldb.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil)
ldb.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil)
ldb.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil)
ldb.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil)
ldb.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil)
ldb.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil)
// Start up the metrics gathering and return
go ldb.meter(metricsGatheringInterval)
return ldb, nil
}
// Close stops the metrics collection, flushes any pending data to disk and closes
// all io accesses to the underlying key-value store.
func (db *Database) Close() error {
db.quitLock.Lock()
defer db.quitLock.Unlock()
if db.quitChan != nil {
errc := make(chan error)
db.quitChan <- errc
if err := <-errc; err != nil {
db.log.Error("Metrics collection failed", "err", err)
}
db.quitChan = nil
}
return db.db.Close()
}
// Has retrieves if a key is present in the key-value store.
func (db *Database) Has(key []byte) (bool, error) {
return db.db.Has(key, nil)
}
// Get retrieves the given key if it's present in the key-value store.
func (db *Database) Get(key []byte) ([]byte, error) {
dat, err := db.db.Get(key, nil)
if err != nil {
return nil, err
}
return dat, nil
}
// Put inserts the given value into the key-value store.
func (db *Database) Put(key []byte, value []byte) error {
return db.db.Put(key, value, nil)
}
// Delete removes the key from the key-value store.
func (db *Database) Delete(key []byte) error {
return db.db.Delete(key, nil)
}
// NewBatch creates a write-only key-value store that buffers changes to its host
// database until a final write is called.
func (db *Database) NewBatch() ethdb.Batch {
return &batch{
db: db.db,
b: new(leveldb.Batch),
}
}
// NewIterator creates a binary-alphabetical iterator over the entire keyspace
// contained within the leveldb database.
func (db *Database) NewIterator() ethdb.Iterator {
return db.db.NewIterator(new(util.Range), nil)
}
// NewIteratorWithStart creates a binary-alphabetical iterator over a subset of
// database content starting at a particular initial key (or after, if it does
// not exist).
func (db *Database) NewIteratorWithStart(start []byte) ethdb.Iterator {
|
return db.db.NewIterator(&util.Range{Start: start}, nil)
}
// NewIteratorWithPrefix creates a binary-alphabetical iterator over a subset
// of database content with a particular key prefix.
func (db *Database) NewIteratorWithPrefix(prefix []byte) ethdb.Iterator {
return db.db.NewIterator(util.BytesPrefix(prefix), nil)
}
// Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) {
return db.db.GetProperty(property)
}
// Compact flattens the underlying data store for the given key range. In essence,
// deleted and overwritten versions are discarded, and the data is rearranged to
// reduce the cost of operations needed to access them.
//
// A nil start is treated as a key before all keys in the data store; a nil limit
// is treated as a key after all keys in the data store. If both is nil then it
// will compact entire data store.
func (db *Database) Compact(start []byte, limit []byte) error {
return db.db.CompactRange(util.Range{Start: start, Limit: limit})
}
// Path returns the path to the database directory.
func (db *Database) Path() string {
return db.fn
}
// meter periodically retrieves internal leveldb counters and reports them to
// the metrics subsystem.
//
// This is how a LevelDB stats table looks like (currently):
// Compactions
// Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)
// -------+------------+---------------+---------------+---------------+---------------
// 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098
// 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294
// 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884
// 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000
//
// This is how the write delay look like (currently):
// DelayN:5 Delay:406.604657ms Paused: false
//
// This is how the iostats look like (currently):
// Read(MB):3895.04860 Write(MB):3654.64712
func (db *Database) meter(refresh time.Duration) {
// Create the counters to store current and previous compaction values
compactions := make([][]float64, 2)
for i := 0; i < 2; i++ {
compactions[i] = make([]float64, 3)
}
// Create storage for iostats.
var iostats [2]float64
// Create storage and warning log tracer for write delay.
var (
delaystats [2]int64
lastWritePaused time.Time
)
var (
errc chan error
merr error
)
// Iterate ad infinitum and collect the stats
for i := 1; errc == nil && merr == nil; i++ {
// Retrieve the database stats
stats, err := db.db.GetProperty("leveldb.stats")
if err != nil {
db.log.Error("Failed to read database stats", "err", err)
merr = err
continue
}
// Find the compaction table, skip the header
lines := strings.Split(stats, "\n")
for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
lines = lines[1:]
}
if len(lines) <= 3 {
db.log.Error("Compaction leveldbTable not found")
merr = errors.New("compaction leveldbTable not found")
continue
}
lines = lines[3:]
// Iterate over all the leveldbTable rows, and accumulate the entries
for j := 0; j < len(compactions[i%2]); j++ {
compactions[i%2][j] = 0
}
for _, line := range lines {
parts := strings.Split(line, "|")
if len(parts) != 6 {
break
}
for idx, counter := range parts[3:] {
value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64)
if err != nil {
db.log.Error("Compaction entry parsing failed", "err", err)
merr = err
continue
}
compactions[i%2][idx] += value
}
}
// Update all the requested meters
if db.compTimeMeter != nil {
db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
}
if db.compReadMeter != nil {
db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
}
if db.compWriteMeter != nil {
db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
}
// Retrieve the write delay statistic
writedelay, err := db.db.GetProperty("leveldb.writedelay")
if err != nil {
db.log.Error("Failed to read database write delay statistic", "err", err)
merr = err
continue
}
var (
delayN int64
delayDuration string
duration time.Duration
paused bool
)
if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil {
db.log.Error("Write delay statistic not found")
merr = err
continue
}
duration, err = time.ParseDuration(delayDuration)
if err != nil {
db.log.Error("Failed to parse delay duration", "err", err)
merr = err
continue
}
if db.writeDelayNMeter != nil {
db.writeDelayNMeter.Mark(delayN - delaystats[0])
}
if db.writeDelayMeter != nil {
db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
}
// If a warning that db is performing compaction has been displayed, any subsequent
// warnings will be withheld for one minute not to overwhelm the user.
if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
time.Now().After(lastWritePaused.Add(degradationWarnInterval)) {
db.log.Warn("Database compacting, degraded performance")
lastWritePaused = time.Now()
}
delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
// Retrieve the database iostats.
ioStats, err := db.db.GetProperty("leveldb.iostats")
if err != nil {
db.log.Error("Failed to read database iostats", "err", err)
merr = err
continue
}
var nRead, nWrite float64
parts := strings.Split(ioStats, " ")
if len(parts) < 2 {
db.log.Error("Bad syntax of ioStats", "ioStats", ioStats)
merr = fmt.Errorf("bad syntax of ioStats %s", ioStats)
continue
}
if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil {
db.log.Error("Bad syntax of read entry", "entry", parts[0])
merr = err
continue
}
if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil {
db.log.Error("Bad syntax of write entry", "entry", parts[1])
merr = err
continue
}
if db.diskReadMeter != nil {
db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
}
if db.diskWriteMeter != nil {
db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
}
iostats[0], iostats[1] = nRead, nWrite
// Sleep a bit, then repeat the stats collection
select {
case errc = <-db.quitChan:
// Quit requesting, stop hammering the database
case <-time.After(refresh):
// Timeout, gather a new set of stats
}
}
if errc == nil {
errc = <-db.quitChan
}
errc <- merr
}
// batch is a write-only leveldb batch that commits changes to its host database
// when Write is called. A batch cannot be used concurrently.
type batch struct {
db *leveldb.DB
b *leveldb.Batch
size int
}
// Put inserts the given value into the batch for later committing.
func (b *batch) Put(key, value []byte) error {
b.b.Put(key, value)
b.size += len(value)
return nil
}
// Delete inserts the a key removal into the batch for later committing.
func (b *batch) Delete(key []byte) error {
b.b.Delete(key)
b.size++
return nil
}
// ValueSize retrieves the amount of data queued up for writing.
func (b *batch) ValueSize() int {
return b.size
}
// Write flushes any accumulated data to disk.
func (b *batch) Write() error {
return b.db.Write(b.b, nil)
}
// Reset resets the batch for reuse.
func (b *batch) Reset() {
b.b.Reset()
b.size = 0
}
// Replay replays the batch contents.
func (b *batch) Replay(w ethdb.KeyValueWriter) error {
return b.b.Replay(&replayer{writer: w})
}
// replayer is a small wrapper to implement the correct replay methods.
type replayer struct {
writer ethdb.KeyValueWriter
failure error
}
// Put inserts the given value into the key-value data store.
func (r *replayer) Put(key, value []byte) {
// If the replay already failed, stop executing ops
if r.failure != nil {
return
}
r.failure = r.writer.Put(key, value)
}
// Delete removes the key from the key-value data store.
func (r *replayer) Delete(key []byte) {
// If the replay already failed, stop executing ops
if r.failure != nil {
return
}
r.failure = r.writer.Delete(key)
}
| |
task.go
|
package nut
import (
"github.com/astaxie/beego"
"github.com/astaxie/beego/toolbox"
)
func monitorTask() error {
beego.Info("start monitor task")
return nil
}
func init()
|
{
for _, t := range []*toolbox.Task{
toolbox.NewTask("monitor", "0 */5 * * * *", monitorTask),
} {
toolbox.AddTask(t.Taskname, t)
}
}
|
|
mod.rs
|
//! Collectors listen for metrics in various protocols. They record metrics
//! they receive in a `SharedStore`.
/// Tools for building collectors to be exposed through the Iron HTTP library.
|
/// Provides UDP and TCP StatsD servers.
pub mod statsd;
|
pub mod http;
|
common.go
|
// Copyright 2017 Michal Witkowski. All Rights Reserved.
// See LICENSE for licensing terms.
package grpc_logging
import (
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// ErrorToCode function determines the error code of an error
// This makes using custom errors with grpc middleware easier
type ErrorToCode func(err error) codes.Code
func DefaultErrorToCode(err error) codes.Code {
return grpc.Code(err)
}
// Decider function defines rules for suppressing any interceptor logs
type Decider func(fullMethodName string, err error) bool
|
}
// ServerPayloadLoggingDecider is a user-provided function for deciding whether to log the server-side
// request/response payloads
type ServerPayloadLoggingDecider func(ctx context.Context, fullMethodName string, servingObject interface{}) bool
// ClientPayloadLoggingDecider is a user-provided function for deciding whether to log the client-side
// request/response payloads
type ClientPayloadLoggingDecider func(ctx context.Context, fullMethodName string) bool
|
// DefaultDeciderMethod is the default implementation of decider to see if you should log the call
// by default this if always true so all calls are logged
func DefaultDeciderMethod(fullMethodName string, err error) bool {
return true
|
bitcoin_fa.ts
|
<?xml version="1.0" ?><!DOCTYPE TS><TS language="fa" version="2.1">
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About ChavezCoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+39"/>
<source><b>ChavezCoin</b> version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+41"/>
<source>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2014 The ChavezCoin developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or <a href="http://www.opensource.org/licenses/mit-license.php">http://www.opensource.org/licenses/mit-license.php</a>.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (<a href="https://www.openssl.org/">https://www.openssl.org/</a>) and cryptographic software written by Eric Young (<a href="mailto:[email protected]">[email protected]</a>) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Double-click to edit address or label</source>
<translation>برای ویرایش نشانی یا برچسب دوبار کلیک کنید</translation>
</message>
<message>
<location line="+24"/>
<source>Create a new address</source>
<translation>ایجاد نشانی جدید</translation>
</message>
<message>
<location line="+10"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>نشانی انتخاب شده را در حافظهٔ سیستم کپی کن!</translation>
</message>
<message>
<location line="-7"/>
<source>&New Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-43"/>
<source>These are your ChavezCoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+53"/>
<source>&Copy Address</source>
<translation>&کپی نشانی</translation>
</message>
<message>
<location line="+7"/>
<source>Show &QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Sign a message to prove you own a ChavezCoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Delete the currently selected address from the list</source>
<translation>حذف نشانی انتخابشده از لیست</translation>
</message>
<message>
<location line="-10"/>
<source>Verify a message to ensure it was signed with a specified ChavezCoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>&Delete</source>
<translation>&حذف</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+66"/>
<source>Copy &Label</source>
<translation>کپی و برچسب&گذاری</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>&ویرایش</translation>
</message>
<message>
<location line="+248"/>
<source>Export Address Book Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>پروندهٔ نوع CSV جداشونده با کاما (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+145"/>
<source>Label</source>
<translation>برچسب</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>نشانی</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(بدون برچسب)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>پنجرهٔ گذرواژه</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>گذرواژه را وارد کنید</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>گذرواژهٔ جدید</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>تکرار گذرواژهٔ جدید</translation>
</message>
<message>
<location line="+33"/>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>For staking only</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+38"/>
<source>Encrypt wallet</source>
<translation>رمزنگاری کیف پول</translation>
</message>
<message>
<location line="+7"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>انجام این عملیات نیازمند گذرواژهٔ کیف پول شما برای باز کردن قفل آن است.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>باز کردن قفل کیف پول</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>انجام این عملیات نیازمند گذرواژهٔ کیف پول شما برای رمزگشایی کردن آن است.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>رمزگشایی کیف پول</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>تغییر گذرواژه</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>گذرواژهٔ قدیمی و جدید کیف پول را وارد کنید.</translation>
</message>
<message>
<location line="+45"/>
<source>Confirm wallet encryption</source>
<translation>تأیید رمزنگاری کیف پول</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>آیا مطمئن هستید که میخواهید کیف پول خود را رمزنگاری کنید؟</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>مهم: هر نسخهٔ پشتیبانی که تا کنون از کیف پول خود تهیه کردهاید، باید با کیف پول رمزنگاری شدهٔ جدید جایگزین شود. به دلایل امنیتی، پروندهٔ قدیمی کیف پول بدون رمزنگاری، تا زمانی که از کیف پول رمزنگاریشدهٔ جدید استفاده نکنید، غیرقابل استفاده خواهد بود.</translation>
</message>
<message>
<location line="+103"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>هشدار: کلید Caps Lock روشن است!</translation>
</message>
<message>
<location line="-133"/>
<location line="+60"/>
<source>Wallet encrypted</source>
<translation>کیف پول رمزنگاری شد</translation>
</message>
<message>
<location line="-140"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>ten or more random characters</b>, or <b>eight or more words</b>.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<source>ChavezCoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+44"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>رمزنگاری کیف پول با خطا مواجه شد</translation>
</message>
<message>
<location line="-56"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>رمزنگاری کیف پول بنا به یک خطای داخلی با شکست مواجه شد. کیف پول شما رمزنگاری نشد.</translation>
</message>
<message>
<location line="+7"/>
<location line="+50"/>
<source>The supplied passphrases do not match.</source>
<translation>گذرواژههای داده شده با هم تطابق ندارند.</translation>
</message>
<message>
<location line="-38"/>
<source>Wallet unlock failed</source>
<translation>بازگشایی قفل کیفپول با شکست مواجه شد</translation>
</message>
<message>
<location line="+1"/>
<location line="+12"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>گذرواژهٔ وارد شده برای رمزگشایی کیف پول نادرست بود.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>رمزگشایی ناموفق کیف پول</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>گذرواژهٔ کیف پول با موفقیت عوض شد.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+297"/>
<source>Sign &message...</source>
<translation>&امضای پیام...</translation>
</message>
<message>
<location line="-64"/>
<source>Show general overview of wallet</source>
<translation>نمایش بررسی اجمالی کیف پول</translation>
</message>
<message>
<location line="+17"/>
<source>&Transactions</source>
<translation>&تراکنشها</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>مرور تاریخچهٔ تراکنشها</translation>
</message>
<message>
<location line="+5"/>
<source>&Address Book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit the list of stored addresses and labels</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-18"/>
<source>Show the list of addresses for receiving payments</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<source>E&xit</source>
<translation>&خروج</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>خروج از برنامه</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about ChavezCoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>دربارهٔ &کیوت</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>نمایش اطلاعات دربارهٔ کیوت</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&تنظیمات...</translation>
</message>
<message>
<location line="+4"/>
<source>&Encrypt Wallet...</source>
<translation>&رمزنگاری کیف پول...</translation>
</message>
<message>
<location line="+2"/>
<source>&Backup Wallet...</source>
<translation>&پیشتیبانگیری از کیف پول...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&تغییر گذرواژه...</translation>
</message>
<message>
<location line="+9"/>
<source>&Export...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-55"/>
<source>Send coins to a ChavezCoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+39"/>
<source>Modify configuration options for ChavezCoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-13"/>
<source>Encrypt or decrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Backup wallet to another location</source>
<translation>تهیهٔ پشتیبان از کیف پول در یک مکان دیگر</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>تغییر گذرواژهٔ مورد استفاده در رمزنگاری کیف پول</translation>
</message>
<message>
<location line="+10"/>
<source>&Debug window</source>
<translation>پنجرهٔ ا&شکالزدایی</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>باز کردن کنسول خطایابی و اشکالزدایی</translation>
</message>
<message>
<location line="-5"/>
<source>&Verify message...</source>
<translation>با&زبینی پیام...</translation>
</message>
<message>
<location line="-214"/>
<location line="+555"/>
<source>ChavezCoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-555"/>
<source>Wallet</source>
<translation>کیف پول</translation>
</message>
<message>
<location line="+193"/>
<source>&About ChavezCoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&نمایش/ عدم نمایش</translation>
</message>
<message>
<location line="+8"/>
<source>Unlock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>&Lock Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Lock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>&File</source>
<translation>&پرونده</translation>
</message>
<message>
<location line="+8"/>
<source>&Settings</source>
<translation>&تنظیمات</translation>
</message>
<message>
<location line="+8"/>
<source>&Help</source>
<translation>&کمکرسانی</translation>
</message>
<message>
<location line="+17"/>
<source>Tabs toolbar</source>
<translation>نوارابزار برگهها</translation>
</message>
<message>
<location line="+46"/>
<location line="+9"/>
<source>[testnet]</source>
<translation>[شبکهٔ آزمایش]</translation>
</message>
<message>
<location line="+0"/>
<location line="+58"/>
<source>ChavezCoin client</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+70"/>
<source>%n active connection(s) to ChavezCoin network</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+488"/>
<source>Staking.<br>Your weight is %1<br>Network weight is %2<br>Expected time to earn reward is %3</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Not staking because wallet is locked</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is syncing</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because you don't have mature coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-812"/>
<source>&Dashboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Receive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>&Unlock Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+277"/>
<source>Up to date</source>
<translation>وضعیت بهروز</translation>
</message>
<message>
<location line="+43"/>
<source>Catching up...</source>
<translation>بهروز رسانی...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Sent transaction</source>
<translation>تراکنش ارسال شد</translation>
</message>
<message>
<location line="+1"/>
<source>Incoming transaction</source>
<translation>تراکنش دریافت شد</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>تاریخ: %1
مبلغ: %2
نوع: %3
نشانی: %4
</translation>
</message>
<message>
<location line="+100"/>
<location line="+15"/>
<source>URI handling</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-15"/>
<location line="+15"/>
<source>URI can not be parsed! This can be caused by an invalid ChavezCoin address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Wallet is <b>not encrypted</b></source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>کیف پول <b>رمزنگاری شده</b> است و هماکنون <b>باز</b> است</translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>کیف پول <b>رمزنگاری شده</b> است و هماکنون <b>قفل</b> است</translation>
</message>
<message>
<location line="+24"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+91"/>
<source>%n second(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n minute(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="-429"/>
<location line="+433"/>
<source>%n hour(s)</source>
<translation><numerusform>%n ساعت</numerusform></translation>
</message>
<message>
<location line="-456"/>
<source>Processed %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+27"/>
<location line="+433"/>
<source>%n day(s)</source>
<translation><numerusform>%n روز</numerusform></translation>
</message>
<message numerus="yes">
<location line="-429"/>
<location line="+6"/>
<source>%n week(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+0"/>
<source>%1 and %2</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+0"/>
<source>%n year(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Last received block was generated %1 ago.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+69"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+324"/>
<source>Not staking</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="+104"/>
<source>A fatal error occurred. ChavezCoin can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+119"/>
<source>Network Alert</source>
<translation>پیام شبکه</translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<location filename="../forms/coincontroldialog.ui" line="+14"/>
<source>Coin Control</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Quantity:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
|
<source>Bytes:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+48"/>
<source>Amount:</source>
<translation>مبلغ:</translation>
</message>
<message>
<location line="+48"/>
<source>Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="+493"/>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/coincontroldialog.ui" line="+51"/>
<source>After Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Change:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+69"/>
<source>(un)select all</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Tree mode</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>List mode</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+45"/>
<source>Amount</source>
<translation>مبلغ</translation>
</message>
<message>
<location line="+5"/>
<source>Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Address</source>
<translation>نشانی</translation>
</message>
<message>
<location line="+5"/>
<source>Date</source>
<translation>تاریخ</translation>
</message>
<message>
<location line="+5"/>
<source>Confirmations</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirmed</source>
<translation>تأیید شده</translation>
</message>
<message>
<location line="+5"/>
<source>Priority</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="-456"/>
<source>Copy address</source>
<translation>کپی نشانی</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>کپی برچسب</translation>
</message>
<message>
<location line="+1"/>
<location line="+26"/>
<source>Copy amount</source>
<translation>کپی مقدار</translation>
</message>
<message>
<location line="-25"/>
<source>Copy transaction ID</source>
<translation>کپی شناسهٔ تراکنش</translation>
</message>
<message>
<location line="+24"/>
<source>Copy quantity</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Copy fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+423"/>
<source>DUST</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>yes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>This label turns red, if the transaction size is bigger than 10000 bytes.
This means a fee of at least %1 per kb is required.
Can vary +/- 1 Byte per input.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if any recipient receives an amount smaller than %1.
This means a fee of at least %2 is required.
Amounts below 0.546 times the minimum relay fee are shown as DUST.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if the change is smaller than %1.
This means a fee of at least %2 is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<location line="+58"/>
<source>(no label)</source>
<translation>(بدون برچسب)</translation>
</message>
<message>
<location line="-9"/>
<source>change from %1 (%2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>(change)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>ویرایش نشانی</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&برچسب</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&نشانی</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>نشانی دریافتی جدید</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>نشانی ارسالی جدید</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>ویرایش نشانی دریافتی</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>ویرایش نشانی ارسالی</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>نشانی وارد شده «%1» در حال حاضر در دفترچه وجود دارد.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid ChavezCoin address.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>نمیتوان کیف پول را رمزگشایی کرد.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>ایجاد کلید جدید با شکست مواجه شد.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+426"/>
<location line="+12"/>
<source>ChavezCoin-Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>گزینهها</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>&عمومی</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>پرداخت &کارمزد تراکنش</translation>
</message>
<message>
<location line="+31"/>
<source>Reserved amount does not participate in staking and is therefore spendable at any time.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Reserve</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Automatically start ChavezCoin after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Start ChavezCoin on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Network</source>
<translation>&شبکه</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the ChavezCoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>نگاشت درگاه شبکه با استفاده از پروتکل &UPnP</translation>
</message>
<message>
<location line="+19"/>
<source>Proxy &IP:</source>
<translation>آ&یپی پراکسی:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&درگاه:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>درگاه پراکسی (مثال 9050)</translation>
</message>
<message>
<location line="-57"/>
<source>Connect to the ChavezCoin network through a SOCKS5 proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS5 proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+90"/>
<source>&Window</source>
<translation>&پنجره</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>تنها بعد از کوچک کردن پنجره، tray icon را نشان بده.</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&کوچک کردن به سینی بهجای نوار وظیفه</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>مخفی کردن در نوار کناری بهجای خروج هنگام بستن پنجره. زمانی که این گزینه فعال است، برنامه فقط با استفاده از گزینهٔ خروج در منو قابل بسته شدن است.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>کوچک کردن &در زمان بسته شدن</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&نمایش</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>زبان &رابط کاربری:</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting ChavezCoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>&واحد نمایش مبالغ:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>انتخاب واحد پول مورد استفاده برای نمایش در پنجرهها و برای ارسال سکه.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show coin control features or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Display coin &control features (experts only!)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use black visual theme (requires restart)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&تأیید</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&لغو</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+47"/>
<source>default</source>
<translation>پیشفرض</translation>
</message>
<message>
<location line="+147"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting ChavezCoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>آدرس پراکسی داده شده صحیح نیست.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>فرم</translation>
</message>
<message>
<location line="+46"/>
<location line="+247"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the ChavezCoin network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-173"/>
<source>Stake:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Unconfirmed:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-113"/>
<source>Wallet</source>
<translation>کیف پول</translation>
</message>
<message>
<location line="+49"/>
<source>Spendable:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation>تراز علیالحساب شما</translation>
</message>
<message>
<location line="+80"/>
<source>Immature:</source>
<translation>نارسیده:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>تراز استخراج شده از معدن که هنوز بالغ نشده است</translation>
</message>
<message>
<location line="+23"/>
<source>Total:</source>
<translation>جمع کل:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation>تراز کل فعلی شما</translation>
</message>
<message>
<location line="+50"/>
<source><b>Recent transactions</b></source>
<translation><b>تراکنشهای اخیر</b></translation>
</message>
<message>
<location line="-118"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-32"/>
<source>Total of coins that was staked, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>ناهمگام</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start chavezcoin: click-to-pay handler</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>نام کلاینت</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<source>N/A</source>
<translation>ناموجود</translation>
</message>
<message>
<location line="-194"/>
<source>Client version</source>
<translation>نسخهٔ کلاینت</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&اطلاعات</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>نسخهٔ OpenSSL استفاده شده</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>زمان آغاز به کار</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>شبکه</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>تعداد ارتباطات</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>زنجیرهٔ بلوکها</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>تعداد فعلی بلوکها</translation>
</message>
<message>
<location line="+197"/>
<source>&Network Traffic</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+52"/>
<source>&Clear</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Totals</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+64"/>
<location filename="../rpcconsole.cpp" line="+352"/>
<source>In:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+80"/>
<location filename="../rpcconsole.cpp" line="+1"/>
<source>Out:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-383"/>
<source>Last block time</source>
<translation>زمان آخرین بلوک</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>با&ز کردن</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show the ChavezCoin-Qt help message to get a list with possible ChavezCoin command-line options.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&کنسول</translation>
</message>
<message>
<location line="-237"/>
<source>Build date</source>
<translation>ساخت تاریخ</translation>
</message>
<message>
<location line="-104"/>
<source>ChavezCoin - Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>ChavezCoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+256"/>
<source>Debug log file</source>
<translation>فایلِ لاگِ اشکال زدایی</translation>
</message>
<message>
<location line="+7"/>
<source>Open the ChavezCoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>پاکسازی کنسول</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-28"/>
<source>Welcome to the ChavezCoin RPC console.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>دکمههای بالا و پایین برای پیمایش تاریخچه و <b>Ctrl-L</b> برای پاک کردن صفحه.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>برای نمایش یک مرور کلی از دستورات ممکن، عبارت <b>help</b> را بنویسید.</translation>
</message>
<message>
<location line="+134"/>
<source>%1 B</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 KB</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 MB</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 GB</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>%1 m</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>%1 h</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 h %2 m</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+179"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>ارسال سکه</translation>
</message>
<message>
<location line="+76"/>
<source>Coin Control Features</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Inputs...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>automatically selected</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Insufficient funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Quantity:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<location line="+35"/>
<source>0</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-19"/>
<source>Bytes:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+51"/>
<source>Amount:</source>
<translation>مبلغ:</translation>
</message>
<message>
<location line="+54"/>
<source>Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>After Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<source>custom change address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+106"/>
<source>Send to multiple recipients at once</source>
<translation>ارسال به چند دریافتکنندهٔ بهطور همزمان</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>&دریافتکنندهٔ جدید</translation>
</message>
<message>
<location line="+16"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>پاکسازی &همه</translation>
</message>
<message>
<location line="+24"/>
<source>Balance:</source>
<translation>تزار:</translation>
</message>
<message>
<location line="+47"/>
<source>Confirm the send action</source>
<translation>عملیات ارسال را تأیید کنید</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>&ارسال</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-171"/>
<source>Enter a ChavezCoin address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Copy quantity</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>کپی مقدار</translation>
</message>
<message>
<location line="+1"/>
<source>Copy fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+85"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>ارسال سکه را تأیید کنید</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>نشانی گیرنده معتبر نیست؛ لطفا دوباره بررسی کنید.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>مبلغ پرداخت باید بیشتر از ۰ باشد.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>میزان پرداخت از تراز شما بیشتر است.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>با احتساب هزینهٔ %1 برای هر تراکنش، مجموع میزان پرداختی از مبلغ تراز شما بیشتر میشود.</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>یک نشانی تکراری پیدا شد. در هر عملیات ارسال، به هر نشانی فقط مبلغ میتوان ارسال کرد.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+241"/>
<source>WARNING: Invalid ChavezCoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>(no label)</source>
<translation>(بدون برچسب)</translation>
</message>
<message>
<location line="+4"/>
<source>WARNING: unknown change address</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>A&مبلغ :</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>پرداخ&ت به:</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>برای این نشانی یک برچسب وارد کنید تا در دفترچهٔ آدرس ذخیره شود</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&برچسب:</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>چسباندن نشانی از حافظهٔ سیستم</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a ChavezCoin address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>امضاها - امضا / تأیید یک پیام</translation>
</message>
<message>
<location line="+13"/>
<location line="+124"/>
<source>&Sign Message</source>
<translation>ا&مضای پیام</translation>
</message>
<message>
<location line="-118"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>برای احراز اینکه پیامها از جانب شما هستند، میتوانید آنها را با نشانی خودتان امضا کنید. مراقب باشید چیزی که بدان اطمینان ندارید را امضا نکنید زیرا حملات فیشینگ ممکن است بخواهند از.پیامی با امضای شما سوءاستفاده کنند. تنها مواردی را که حاوی اطلاعات دقیق و قابل قبول برای شما هستند امضا کنید.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+203"/>
<source>Choose an address from the address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-193"/>
<location line="+203"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-193"/>
<source>Paste address from clipboard</source>
<translation>چسباندن نشانی از حافظهٔ سیستم</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>پیامی را که میخواهید امضا کنید در اینجا وارد کنید</translation>
</message>
<message>
<location line="+24"/>
<source>Copy the current signature to the system clipboard</source>
<translation>امضای فعلی را به حافظهٔ سیستم کپی کن</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this ChavezCoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all sign message fields</source>
<translation>بازنشانی تمام فیلدهای پیام</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>پاک &کردن همه</translation>
</message>
<message>
<location line="-87"/>
<location line="+70"/>
<source>&Verify Message</source>
<translation>&شناسایی پیام</translation>
</message>
<message>
<location line="-64"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>برای شناسایی پیام، نشانیِ امضا کننده و متن پیام را وارد کنید. (مطمئن شوید که فاصلهها، تبها و خطوط را عیناً کپی میکنید.) مراقب باشید در امضا چیزی بیشتر از آنچه در پیام میبینید وجود نداشته باشد تا فریب دزدان اینترنتی و حملات از نوع MITM را نخورید.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified ChavezCoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all verify message fields</source>
<translation>بازنشانی تمام فیلدهای پیام</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a ChavezCoin address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>برای ایجاد یک امضای جدید روی «امضای پیام» کلیک کنید</translation>
</message>
<message>
<location line="+3"/>
<source>Enter ChavezCoin signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+85"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>نشانی وارد شده نامعتبر است.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>لطفاً نشانی را بررسی کنید و دوباره تلاش کنید.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>نشانی وارد شده به هیچ کلیدی اشاره نمیکند.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>عملیات باز کرن قفل کیف پول لغو شد.</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>کلید خصوصی برای نشانی وارد شده در دسترس نیست.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>امضای پیام با شکست مواجه شد.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>پیام امضا شد.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>امضا نمیتواند کدگشایی شود.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>لطفاً امضا را بررسی نموده و دوباره تلاش کنید.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>امضا با خلاصهٔ پیام مطابقت ندارد.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>شناسایی پیام با شکست مواجه شد.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>پیام شناسایی شد.</translation>
</message>
</context>
<context>
<name>TrafficGraphWidget</name>
<message>
<location filename="../trafficgraphwidget.cpp" line="+75"/>
<source>KB/s</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+25"/>
<source>Open until %1</source>
<translation>باز تا %1</translation>
</message>
<message>
<location line="+6"/>
<source>conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/offline</source>
<translation>%1/آفلاین</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/تأیید نشده</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 تأییدیه</translation>
</message>
<message>
<location line="+17"/>
<source>Status</source>
<translation>وضعیت</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>، پخش از طریق %n گره</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>تاریخ</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>منبع</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>تولید شده</translation>
</message>
<message>
<location line="+5"/>
<location line="+13"/>
<source>From</source>
<translation>فرستنده</translation>
</message>
<message>
<location line="+1"/>
<location line="+19"/>
<location line="+58"/>
<source>To</source>
<translation>گیرنده</translation>
</message>
<message>
<location line="-74"/>
<location line="+2"/>
<source>own address</source>
<translation>آدرس شما</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>برچسب</translation>
</message>
<message>
<location line="+34"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>بدهی</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>بلوغ در %n بلوک دیگر</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>پذیرفته نشد</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>اعتبار</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>هزینهٔ تراکنش</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>مبلغ خالص</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>پیام</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>نظر</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>شناسهٔ تراکنش</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>اطلاعات اشکالزدایی</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>تراکنش</translation>
</message>
<message>
<location line="+5"/>
<source>Inputs</source>
<translation>ورودیها</translation>
</message>
<message>
<location line="+21"/>
<source>Amount</source>
<translation>مبلغ</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>درست</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>نادرست</translation>
</message>
<message>
<location line="-202"/>
<source>, has not been successfully broadcast yet</source>
<translation>، هنوز با موفقیت ارسال نشده</translation>
</message>
<message numerus="yes">
<location line="-36"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+67"/>
<source>unknown</source>
<translation>ناشناس</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>جزئیات تراکنش</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>این پانل شامل توصیف کاملی از جزئیات تراکنش است</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+231"/>
<source>Date</source>
<translation>تاریخ</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>نوع</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>نشانی</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>مبلغ</translation>
</message>
<message>
<location line="+52"/>
<source>Open until %1</source>
<translation>باز شده تا %1</translation>
</message>
<message>
<location line="+12"/>
<source>Confirmed (%1 confirmations)</source>
<translation>تأیید شده (%1 تأییدیه)</translation>
</message>
<message numerus="yes">
<location line="-15"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>باز برای %n بلوک دیگر</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>این بلوک از هیچ همتای دیگری دریافت نشده است و احتمال میرود پذیرفته نشود!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>تولید شده ولی قبول نشده</translation>
</message>
<message>
<location line="+42"/>
<source>Received with</source>
<translation>دریافتشده با</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>دریافتشده از</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>ارسالشده به</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>پر داخت به خودتان</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>استخراجشده</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(ناموجود)</translation>
</message>
<message>
<location line="+194"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>وضعیت تراکنش. نشانگر را روی این فیلد نگه دارید تا تعداد تأییدیهها نشان داده شود.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>تاریخ و ساعت دریافت تراکنش.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>نوع تراکنش.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>نشانی مقصد تراکنش.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>مبلغ کسر شده و یا اضافه شده به تراز.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+54"/>
<location line="+17"/>
<source>All</source>
<translation>همه</translation>
</message>
<message>
<location line="-16"/>
<source>Today</source>
<translation>امروز</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>این هفته</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>این ماه</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>ماه گذشته</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>امسال</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>محدوده...</translation>
</message>
<message>
<location line="+12"/>
<source>Received with</source>
<translation>دریافتشده با </translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>ارسال به</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>به خودتان</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>استخراجشده</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>دیگر</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>برای جستوجو نشانی یا برچسب را وارد کنید</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>مبلغ حداقل</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>کپی نشانی</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>کپی برچسب</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>کپی مقدار</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>کپی شناسهٔ تراکنش</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>ویرایش برچسب</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>نمایش جزئیات تراکنش</translation>
</message>
<message>
<location line="+138"/>
<source>Export Transaction Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>پروندهٔ نوع CSV جداشونده با کاما (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>تأیید شده</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>تاریخ</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>نوع</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>برچسب</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>نشانی</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>مبلغ</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>شناسه</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>محدوده:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>به</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+212"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+8"/>
<source>ChavezCoin version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Usage:</source>
<translation>استفاده:</translation>
</message>
<message>
<location line="+1"/>
<source>Send command to -server or chavezcoind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>List commands</source>
<translation>نمایش لیست فرمانها</translation>
</message>
<message>
<location line="+1"/>
<source>Get help for a command</source>
<translation>راهنمایی در مورد یک دستور</translation>
</message>
<message>
<location line="+1"/>
<source>Options:</source>
<translation>گزینهها:</translation>
</message>
<message>
<location line="+2"/>
<source>Specify configuration file (default: chavezcoin.conf)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Specify pid file (default: chavezcoind.pid)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify wallet file (within data directory)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>مشخص کردن دایرکتوری دادهها</translation>
</message>
<message>
<location line="+163"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=chavezcoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "ChavezCoin Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-161"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>تنظیم اندازهٔ کَش پایگاهداده برحسب مگابایت (پیشفرض: ۲۵)</translation>
</message>
<message>
<location line="+1"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Listen for connections on <port> (default: 15714 or testnet: 25714)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>حداکثر <n> اتصال با همتایان برقرار شود (پیشفرض: ۱۲۵)</translation>
</message>
<message>
<location line="+3"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>اتصال به یک گره برای دریافت آدرسهای همتا و قطع اتصال پس از اتمام عملیات</translation>
</message>
<message>
<location line="+1"/>
<source>Specify your own public address</source>
<translation>آدرس عمومی خود را مشخص کنید</translation>
</message>
<message>
<location line="+4"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Always query for peer addresses via DNS lookup (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>حد آستانه برای قطع ارتباط با همتایان بدرفتار (پیشفرض: ۱۰۰)</translation>
</message>
<message>
<location line="+1"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>مدت زمان جلوگیری از اتصال مجدد همتایان بدرفتار، به ثانیه (پیشفرض: ۸۴۶۰۰)</translation>
</message>
<message>
<location line="+153"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>هنگام تنظیم پورت RPC %u برای گوش دادن روی IPv4 خطایی رخ داده است: %s</translation>
</message>
<message>
<location line="-126"/>
<source>Listen for JSON-RPC connections on <port> (default: 15715 or testnet: 25715)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-16"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>پذیرش دستورات خط فرمان و دستورات JSON-RPC</translation>
</message>
<message>
<location line="+1"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>اجرا در پشت زمینه بهصورت یک سرویس و پذیرش دستورات</translation>
</message>
<message>
<location line="+1"/>
<source>Use the test network</source>
<translation>استفاده از شبکهٔ آزمایش</translation>
</message>
<message>
<location line="-23"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>پذیرش اتصالات از بیرون (پیش فرض:1 بدون پراکسی یا اتصال)</translation>
</message>
<message>
<location line="+160"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-84"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>هشدار: مبلغ paytxfee بسیار بالایی تنظیم شده است! این مبلغ هزینهای است که شما برای تراکنشها پرداخت میکنید.</translation>
</message>
<message>
<location line="+46"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong ChavezCoin will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-19"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-16"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-31"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Block creation options:</source>
<translation>بستن گزینه ایجاد</translation>
</message>
<message>
<location line="-66"/>
<source>Connect only to the specified node(s)</source>
<translation>تنها در گره (های) مشخص شده متصل شوید</translation>
</message>
<message>
<location line="+4"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>آدرس آی.پی. خود را شناسایی کنید (پیش فرض:1 در زمان when listening وno -externalip)</translation>
</message>
<message>
<location line="+97"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>شنیدن هر گونه درگاه انجام پذیر نیست. ازlisten=0 برای اینکار استفاده کیند.</translation>
</message>
<message>
<location line="-2"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Invalid amount for -reservebalance=<amount></source>
<translation type="unfinished"/>
</message>
<message>
<location line="-85"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>حداکثر بافر دریافت شده بر اساس اتصال <n>* 1000 بایت (پیش فرض:5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>حداکثر بافر دریافت شده بر اساس اتصال <n>* 1000 بایت (پیش فرض:1000)</translation>
</message>
<message>
<location line="-16"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>تنها =به گره ها در شبکه متصا شوید <net> (IPv4, IPv6 or Tor)</translation>
</message>
<message>
<location line="+30"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+36"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation>گزینه ssl (به ویکیbitcoin برای راهنمای راه اندازی ssl مراجعه شود)</translation>
</message>
<message>
<location line="-34"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>اطلاعات ردگیری/اشکالزدایی را به جای فایل لاگ اشکالزدایی به کنسول بفرستید</translation>
</message>
<message>
<location line="+33"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>حداقل سایز بلاک بر اساس بایت تنظیم شود (پیش فرض: 0)</translation>
</message>
<message>
<location line="-33"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>فایل debug.log را در startup مشتری کوچک کن (پیش فرض:1 اگر اشکال زدایی روی نداد)</translation>
</message>
<message>
<location line="-41"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>(میلی ثانیه )فاصله ارتباط خاص</translation>
</message>
<message>
<location line="+28"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>از UPnP برای شناسایی درگاه شنیداری استفاده کنید (پیش فرض:0)</translation>
</message>
<message>
<location line="-1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>از UPnP برای شناسایی درگاه شنیداری استفاده کنید (پیش فرض:1 در زمان شنیدن)</translation>
</message>
<message>
<location line="-25"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+45"/>
<source>Username for JSON-RPC connections</source>
<translation>JSON-RPC شناسه برای ارتباطات</translation>
</message>
<message>
<location line="+50"/>
<source>Verifying database integrity...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+43"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-15"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-7"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>هشدار: این نسخه قدیمی است، روزآمدسازی مورد نیاز است</translation>
</message>
<message>
<location line="-23"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-55"/>
<source>Password for JSON-RPC connections</source>
<translation>JSON-RPC عبارت عبور برای ارتباطات</translation>
</message>
<message>
<location line="-47"/>
<source>Connect through SOCKS5 proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Output debugging information (default: 0, supplying <category> is optional)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>If <category> is not supplied, output all debugging information.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source><category> can be:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly. This is intended for regression testing tools and app development.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>از آدرس آی پی خاص JSON-RPC قبول ارتباطات</translation>
</message>
<message>
<location line="+1"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>(127.0.0.1پیش فرض: ) &lt;ip&gt; دادن فرمانها برای استفاده گره ها روی</translation>
</message>
<message>
<location line="+1"/>
<source>Wait for RPC server to start</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>زمانی که بهترین بلاک تغییر کرد، دستور را اجرا کن (%s در cmd با block hash جایگزین شده است)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>هنگامی که یک تراکنش در کیف پولی رخ می دهد، دستور را اجرا کن(%s در دستورات بوسیله ی TxID جایگزین می شود)</translation>
</message>
<message>
<location line="+3"/>
<source>Require a confirmations for change (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Upgrade wallet to latest format</source>
<translation>wallet را به جدیدترین فرمت روزآمد کنید</translation>
</message>
<message>
<location line="+1"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation> (100پیش فرض:)&lt;n&gt; گذاشتن اندازه کلید روی </translation>
</message>
<message>
<location line="+1"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>اسکان مجدد زنجیر بلوکها برای گم والت معامله</translation>
</message>
<message>
<location line="+3"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Imports blocks from external blk000?.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Keep at most <n> MiB of unconnectable blocks in memory (default: %u)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>JSON-RPCبرای ارتباطات استفاده کنید OpenSSL (https)</translation>
</message>
<message>
<location line="+1"/>
<source>Server certificate file (default: server.cert)</source>
<translation> (server.certپیش فرض: )گواهی نامه سرور</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>(server.pemپیش فرض: ) کلید خصوصی سرور</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Unsupported argument -socks found. Setting SOCKS version isn't possible anymore, only SOCKS5 proxies are supported.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Initialization sanity check failed. ChavezCoin is shutting down.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Error loading block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Error: Wallet unlocked for staking only, unable to create transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-14"/>
<source>Error: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-135"/>
<source>This help message</source>
<translation>پیام کمکی</translation>
</message>
<message>
<location line="+100"/>
<source>Wallet %s resides outside data directory %s.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+46"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>امکان اتصال به %s از این رایانه وجود ندارد ( bind returned error %d, %s)</translation>
</message>
<message>
<location line="-136"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>به DNS اجازه بده تا برای addnode ، seednode و اتصال جستجو کند</translation>
</message>
<message>
<location line="+121"/>
<source>Loading addresses...</source>
<translation>بار گیری آدرس ها</translation>
</message>
<message>
<location line="-10"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>خطا در بارگیری wallet.dat: کیف پول خراب شده است</translation>
</message>
<message>
<location line="+4"/>
<source>Error loading wallet.dat: Wallet requires newer version of ChavezCoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Wallet needed to be rewritten: restart ChavezCoin to complete</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat</source>
<translation>خطا در بارگیری wallet.dat</translation>
</message>
<message>
<location line="-15"/>
<source>Invalid -proxy address: '%s'</source>
<translation>آدرس پراکسی اشتباه %s</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>شبکه مشخص شده غیرقابل شناسایی در onlynet: '%s'</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>آدرس قابل اتصال- شناسایی نیست %s</translation>
</message>
<message>
<location line="+2"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>آدرس خارجی قابل اتصال- شناسایی نیست %s</translation>
</message>
<message>
<location line="-22"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>میزان وجه اشتباه برای paytxfee=<میزان وجه>: %s</translation>
</message>
<message>
<location line="+59"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Invalid amount</source>
<translation>میزان وجه اشتباه</translation>
</message>
<message>
<location line="+1"/>
<source>Insufficient funds</source>
<translation>بود جه نا کافی </translation>
</message>
<message>
<location line="-41"/>
<source>Loading block index...</source>
<translation>بار گیری شاخص بلوک</translation>
</message>
<message>
<location line="-105"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>به اتصال یک گره اضافه کنید و اتصال را باز نگاه دارید</translation>
</message>
<message>
<location line="+131"/>
<source>Unable to bind to %s on this computer. ChavezCoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-108"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>How many blocks to check at startup (default: 500, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Deprecated argument -debugnet ignored, use -debug=net</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Invalid amount for -mininput=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. ChavezCoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Loading wallet...</source>
<translation>بار گیری والت</translation>
</message>
<message>
<location line="+8"/>
<source>Cannot downgrade wallet</source>
<translation>امکان تنزل نسخه در wallet وجود ندارد</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot write default address</source>
<translation>آدرس پیش فرض قابل ذخیره نیست</translation>
</message>
<message>
<location line="+1"/>
<source>Rescanning...</source>
<translation>اسکان مجدد</translation>
</message>
<message>
<location line="+2"/>
<source>Done loading</source>
<translation>بار گیری انجام شده است</translation>
</message>
<message>
<location line="+33"/>
<source>To use the %s option</source>
<translation>برای استفاده از %s از انتخابات</translation>
</message>
<message>
<location line="-27"/>
<source>Error</source>
<translation>خطا</translation>
</message>
<message>
<location line="+22"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>%s، شما باید یک rpcpassword را در فایل پیکربندی تنظیم کنید :⏎%s⏎ اگر فایل ایجاد نشد، یک فایل فقط متنی ایجاد کنید.
</translation>
</message>
</context>
</TS>
| |
generate-file-list.ts
|
import fs from 'fs';
import path, { join } from 'path';
/*
This is required as Firefox can't access filesystem like chrome does via browser.runtime.getPackageDirectoryEntry.
Extension will fetch the file list for plugins and scenarios and afterwards the actual files.
*/
// 1. plugin list
const pluginDir = path.join(process.cwd(), 'dist/background/plugins');
const pluginFiles = fs.readdirSync(pluginDir, { withFileTypes: true }).filter(pluginFile => pluginFile.isFile());
fs.writeFileSync(
join(process.cwd(), 'dist/background/plugins/plugin-list.json'),
JSON.stringify(
pluginFiles.map(pluginFile => pluginFile.name.match(/^(\w+).js$/)[1]),
),
);
|
// 2. scenario list
const scenarioRootDir = path.join(process.cwd(), 'dist/scenarios');
const scenarioDirs = fs.readdirSync(scenarioRootDir, { withFileTypes: true }).filter(scenarioDir => scenarioDir.isDirectory());
fs.writeFileSync(
join(process.cwd(), 'dist/scenarios/scenario-list.json'),
JSON.stringify(
scenarioDirs.map(scenarioDir => scenarioDir.name),
),
);
| |
mod.rs
|
// Unless explicitly stated otherwise all files in this repository are licensed under the
// MIT/Apache-2.0 License, at your convenience
//
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2020 Datadog, Inc.
//
//! glommio::timer is a module that provides timing related primitives.
mod timer_impl;
pub use timer_impl::{Timer, TimerActionOnce, TimerActionRepeat};
/// Sleep for some time.
///
/// ```
/// use glommio::LocalExecutor;
/// use glommio::timer::sleep;
/// use std::time::Duration;
///
/// let ex = LocalExecutor::default();
///
/// ex.run(async {
/// sleep(Duration::from_millis(100)).await;
/// });
/// ```
pub async fn sleep(wait: std::time::Duration)
|
{
Timer::new(wait).await;
}
|
|
NeuronTest.py
|
from __future__ import print_function
from __future__ import absolute_import
from .DataTestTemplate import _DataTest
from PyOpenWorm.neuron import Neuron
from PyOpenWorm.cell import Cell
from PyOpenWorm.connection import Connection
from PyOpenWorm.context import Context
class NeuronTest(_DataTest):
ctx_classes = (Neuron, Connection)
def setUp(self):
_DataTest.setUp(self)
self.neur = lambda x: self.ctx.Neuron(name=x)
def test_Cell(self):
do = self.neur('BDUL')
self.assertTrue(isinstance(do, Cell))
def test_receptors(self):
n = self.neur('AVAL')
n.receptor('GLR-2')
self.save()
self.assertIn('GLR-2', list(self.neur('AVAL').receptors()))
def test_same_name_same_id(self):
"""
Test that two Neuron objects with the same name have the same
identifier. Saves us from having too many inserts of the same object.
"""
c = Neuron(name="boots")
c1 = Neuron(name="boots")
self.assertEqual(c.identifier, c1.identifier)
def test_type(self):
n = self.neur('AVAL')
n.type('interneuron')
self.save()
self.assertEqual('interneuron', self.neur('AVAL').type.one())
def test_name(self):
"""
Test that the name property is set when the neuron is initialized
with it
"""
self.assertEqual('AVAL', self.neur('AVAL').name())
self.assertEqual('AVAR', self.neur('AVAR').name())
def test_neighbor(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
neighbors = list(n.neighbor())
self.assertIn(self.neur('PVCL'), neighbors)
self.save()
self.assertIn(self.neur('PVCL'), list(self.neur('AVAL').neighbor()))
def test_neighbor_count(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
self.save()
p = self.ctx.Neuron()
self.neur('AVAL').neighbor(p)
self.assertEqual(1, p.count())
def test_neighbor_count_staged(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
self.assertEqual(1, n.neighbor.count())
def test_neighbor_count_context_staged(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
ctx1 = Context(ident='http://example.org/ctx1')
self.assertEqual(0, ctx1(n).neighbor.count())
def test_connection_count(self):
n = self.neur('AVAL')
n.connection(self.ctx.Connection(n, self.neur('PVCL'), syntype='send'))
self.save()
self.assertEqual(1, self.neur('AVAL').connection.count())
def test_connection_count_staged(self):
n = self.neur('AVAL')
n.connection(self.ctx.Connection(n, self.neur('PVCL'), syntype='send'))
self.assertEqual(1, n.connection.count())
def test_neighbor_context(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
ctx1 = Context(ident='http://example.org/ctx1')
n0.neighbor(n1)
self.assertEqual(set(), set(ctx1(n0).neighbor()))
def test_connection_get_staged(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
n0.connection(self.ctx.Connection(pre_cell=n0, post_cell=n1, syntype='send'))
self.assertEqual(1, len(n0.connection()))
def test_connection_only_defined(self):
n0 = self.ctx.Neuron(name='NEURON0')
n0.connection(self.ctx.Connection())
self.assertEqual(0, len(n0.connection()))
def test_connection_context(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
|
def test_init_from_lineage_name(self):
c = self.ctx.Neuron(lineageName="AB plapaaaap", name="ADAL")
self.save()
for x in self.TestConfig['rdf.graph'].quads((None, None, None, None)):
print(' '.join(y.n3() for y in x))
c = self.context.stored(Neuron)(lineageName="AB plapaaaap")
print(c.context)
self.assertEqual(c.name(), 'ADAL')
|
ctx1 = Context(ident='http://example.org/ctx1')
n0.connection(self.ctx.Connection(pre_cell=n0, post_cell=n1, syntype='send'))
self.assertEqual(set(), set(ctx1(n0).connection()))
|
form.tests.js
|
import $ from "jquery";
import resizeCallbacks from "core/utils/resize_callbacks";
import responsiveBoxScreenMock from "../../helpers/responsiveBoxScreenMock.js";
import keyboardMock from "../../helpers/keyboardMock.js";
import typeUtils from "core/utils/type";
import browser from "core/utils/browser";
import domUtils from "core/utils/dom";
import { __internals as internals } from "ui/form/ui.form";
import themes from "ui/themes";
import device from "core/devices";
import domAdapter from "core/dom_adapter";
import "ui/text_area";
import "common.css!";
import "generic_light.css!";
var INVALID_CLASS = "dx-invalid";
QUnit.testStart(function() {
var markup =
'<div id="form"></div>\
<div id="form2"></div>';
$("#qunit-fixture").html(markup);
});
QUnit.module("Form");
QUnit.test("Check that registerKeyHandler proxy works well", function(assert) {
// arrange, act
var $formContainer = $("#form").dxForm({
items:
[
{
dataField: "name",
editorType: "dxTextBox"
},
{
dataField: "age",
editorType: "dxNumberBox"
}
]
}),
$inputs = $formContainer.find(".dx-texteditor-input"),
counter = 0,
handler = function() { counter++; };
$formContainer.dxForm("instance").registerKeyHandler("tab", handler);
keyboardMock($inputs.eq(0)).keyDown("tab");
// assert
assert.equal(counter, 1, "Custom key handler for the first editor");
keyboardMock($inputs.eq(1)).keyDown("tab");
// assert
assert.equal(counter, 2, "Custom key handler for the second editor");
});
QUnit.testInActiveWindow("Form's inputs saves value on refresh", function(assert) {
// arrange, act
var screen = "md",
$formContainer = $("#form").dxForm({
screenByWidth: function() {
return screen;
},
colCountByScreen: {
sm: 1,
md: 2
},
items: [
{
dataField: "name",
editorType: "dxTextBox"
}
]
});
$("#form input")
.first()
.focus()
.val("test");
screen = "sm";
resizeCallbacks.fire();
// assert
var formData = $formContainer.dxForm("instance").option("formData");
assert.deepEqual(formData, { name: "test" }, "value updates");
});
QUnit.test("Check field wodth on render form with colspan", function(assert) {
// arrange, act
var $testContainer = $("#form");
$testContainer.dxForm({
formData: { ID: 0, FirstName: "John", LastName: "Dow", HireDate: "01/01/1970" },
colCount: 2,
colCountByScreen: { xs: 2 },
items: [{
itemType: "group",
caption: "Employee",
colCount: 2,
items: [
{ dataField: "ID", colSpan: 2 },
{ dataField: "FirstName", visible: true },
{ dataField: "LastName", visible: true },
{ dataField: "HireDate", colSpan: 2, visible: true }
]
}]
});
var $fieldItems = $testContainer.find("." + internals.FIELD_ITEM_CLASS),
fieldWidths = {
ID: $fieldItems.eq(1).width(),
FirstName: $fieldItems.eq(2).width(),
LastName: $fieldItems.eq(3).width(),
HireDate: $fieldItems.eq(4).width()
};
// assert
assert.equal($fieldItems.length, 5, "4 simple items + 1 group item");
assert.equal(fieldWidths.ID, fieldWidths.HireDate, "fields with colspan 2 have the same width");
assert.equal(fieldWidths.FirstName, fieldWidths.LastName, "fields without colspan have the same width");
assert.ok(fieldWidths.ID > fieldWidths.FirstName, "field with colspan 2 is wider than field without colspan");
});
QUnit.test("Change of the formData field change value of the editor", function(assert) {
// arrange
var $testContainer = $("#form");
$testContainer.dxForm({
formData: { FamousPirate: "John Morgan" }
});
var formInstance = $testContainer.dxForm("instance");
// act
formInstance.option("formData.FamousPirate", "Cpt. Jack Sparrow");
// assert
assert.equal(formInstance.getEditor("FamousPirate").option("value"), "Cpt. Jack Sparrow", "Correct value");
});
QUnit.test("Change editor value after formOption is changed and items is defined", function(assert) {
// arrange
var $testContainer = $("#form"),
form;
form = $testContainer.dxForm({
formData: { pirateName: "Blackbeard", type: "captain", isSought: true },
items: ["pirateName", "type", "isSought"]
}).dxForm("instance");
// act
form.option("formData", {
pirateName: "John Morgan",
type: "captain",
isSought: true
});
form.getEditor("isSought").option("value", false);
// assert
assert.deepEqual(form.option("formData"), {
pirateName: "John Morgan",
type: "captain",
isSought: false
}, "FormData is up to date");
});
QUnit.test("Reset editor value after formData changing only if dataField is defined", function(assert) {
// arrange
var $testContainer = $("#form"),
form;
form = $testContainer.dxForm({
formData: { pirateName: "Blackbeard", type: "captain", isSought: "Test", gender: "Male" },
items: [{ dataField: "gender" }, { dataField: "pirateName" }, { dataField: "type" }, { name: "isSought", editorType: "dxTextBox" }]
}).dxForm("instance");
// act
form.getEditor("isSought").option("value", "Changed");
form.getEditor("gender").option("value", "Female");
form.option("formData", {
pirateName: "John Morgan",
type: "captain"
});
// assert
assert.equal(form.getEditor("isSought").option("value"), "Changed", "'isSought' editor wasn't reseted");
assert.equal(form.getEditor("gender").option("value"), "", "'gender' editor was reseted");
});
QUnit.test("Invalid field name when item is defined not as string and not as object", function(assert) {
// arrange, act
var form = $("#form").dxForm({
formData: { name: "Batman", lastName: "Klark" },
items: [1, "lastName"]
}).dxForm("instance");
// assert
assert.equal(form.$element().find("." + internals.FIELD_ITEM_CLASS).length, 1, "items count");
assert.equal(form.getEditor("name"), undefined, "editor by name field");
assert.equal(form.getEditor("lastName").option("value"), "Klark", "editor by lastName field");
});
QUnit.test("dxshown event fire when visible option changed to true", function(assert) {
// arrange
var form = $("#form").dxForm({
formData: { id: 1 }
}).dxForm("instance"),
dxShownEventCounter = 0;
$(form.$element())
.find(".dx-visibility-change-handler")
.first()
.on("dxshown", function() {
dxShownEventCounter++;
});
// act, assert
form.option("visible", false);
assert.equal(dxShownEventCounter, 0, "dxshown event does not fire");
form.option("visible", true);
assert.equal(dxShownEventCounter, 1, "dxshown event fired");
});
QUnit.test("Reset editor's value when the formData option is empty object", function(assert) {
// arrange
var values = [],
form = $("#form").dxForm({
formData: {
name: "User",
room: 1
},
items: ["name", "lastName", "sex", "room", "isDeveloper"],
onFieldDataChanged: function(e) {
values.push({
dataField: e.dataField,
value: e.value
});
}
}).dxForm("instance");
// act
form.option("formData", {});
// assert
assert.equal(form.getEditor("name").option("value"), "", "editor for the name dataField");
assert.equal(form.getEditor("room").option("value"), null, "editor for the room dataField");
assert.deepEqual(values[0], { dataField: "name", value: "" }, "value of name dataField");
assert.deepEqual(values[3], { dataField: "room", value: null }, "value of room dataField");
});
QUnit.test("Reset editor's value when the formData option is null", function(assert) {
// arrange
var form = $("#form").dxForm({
formData: {
name: "User",
room: 1
},
items: ["name", "room"]
}).dxForm("instance");
// act
form.option("formData", null);
// assert
assert.equal(form.getEditor("name").option("value"), "", "editor for the name dataField");
assert.equal(form.getEditor("room").option("value"), null, "editor for the room dataField");
});
QUnit.test("Reset editor's value when the formData option is undefined", function(assert) {
// arrange
var form = $("#form").dxForm({
formData: {
name: "User",
room: 1
},
items: ["name", "room"]
}).dxForm("instance");
// act
form.option("formData", undefined);
// assert
assert.equal(form.getEditor("name").option("value"), "", "editor for the name dataField");
assert.equal(form.getEditor("room").option("value"), null, "editor for the room dataField");
});
QUnit.test("Reset editor's value with validation", function(assert) {
// arrange
var form = $("#form").dxForm({
formData: {
name: "User",
lastName: "John"
},
items: ["name", { dataField: "lastName", isRequired: true }]
}).dxForm("instance");
// act
form.option("formData", undefined);
// assert
assert.equal(form.getEditor("name").option("value"), "", "editor for the name dataField");
assert.equal(form.getEditor("lastName").option("value"), "", "editor for the lastName dataField");
assert.ok(!form.getEditor("lastName").$element().hasClass(INVALID_CLASS), "not invalid css class");
assert.ok(form.getEditor("lastName").option("isValid"), "isValid");
});
QUnit.test("The 'dataField' option of a simple item should affect the editorOptions.name option", function(assert) {
var form = $("#form").dxForm({
formData: {
firstName: "Mike"
},
items: [{ dataField: "firstName" }]
}).dxForm("instance");
assert.equal(form.getEditor("firstName").option("name"), "firstName", "Editor name is OK");
});
QUnit.test("The 'dataField' option of a simple item should not affect existing editorOptions.name option", function(assert) {
var form = $("#form").dxForm({
formData: {
firstName: "Mike"
},
items: [{ dataField: "firstName", editorOptions: { name: "UserName" } }]
}).dxForm("instance");
assert.equal(form.getEditor("firstName").option("name"), "UserName", "Editor name is OK");
});
QUnit.test("Refresh form when visibility changed to 'true' in msie browser", function(assert) {
// arrange, act
var $testContainer = $("#form"),
expectedRefreshCount = browser.msie ? 1 : 0,
form;
form = $testContainer.dxForm({
formData: { name: "TestName" },
items: [{ dataField: "name" }]
}).dxForm("instance");
var refreshStub = sinon.stub(form, "_refresh");
domUtils.triggerHidingEvent($testContainer);
domUtils.triggerShownEvent($testContainer);
// assert
assert.equal(refreshStub.callCount, expectedRefreshCount, "Refresh on visibility changed to 'true' if browser is IE or Edge");
refreshStub.restore();
});
QUnit.test("Hide helper text when validation message shows for material theme", function(assert) {
var origIsMaterial = themes.isMaterial;
themes.isMaterial = function() { return true; };
var form = $("#form").dxForm({
formData: {
name: "User",
lastName: ""
},
items: [
{ dataField: "name", helpText: "First name field" },
{ dataField: "lastName", isRequired: true, helpText: "Last name field" }
]
}).dxForm("instance");
form.validate();
form.getEditor("lastName").focus();
assert.ok(form.getEditor("lastName").$element().parents(".dx-field-item-content-wrapper").hasClass(INVALID_CLASS), "invalid css class");
form.getEditor("name").focus();
assert.ok(!form.getEditor("lastName").$element().parents(".dx-field-item-content-wrapper").hasClass(INVALID_CLASS), "not invalid css class");
assert.ok(!form.getEditor("name").$element().parents(".dx-field-item-content-wrapper").hasClass(INVALID_CLASS), "not invalid css class");
themes.isMaterial = origIsMaterial;
});
QUnit.test("The formData is updated correctly when formData has 'undefined' value", function(assert) {
// arrange
var $testContainer = $("#form").dxForm({
formData: undefined,
items: [{ dataField: "City" }]
}),
form = $testContainer.dxForm("instance");
// act
var editor = form.getEditor("City");
editor.option("value", "New York");
// assert
var formData = form.option("formData");
assert.deepEqual(formData, { City: "New York" }, "updated formData");
assert.equal($testContainer.find(".dx-field-item").length, 1, "form item is rendered");
});
QUnit.test("The formData with composite object is updated correctly when formData has 'undefined' value", function(assert) {
// arrange
var $testContainer = $("#form").dxForm({
formData: undefined,
items: [{ dataField: "Employee.City" }]
}),
form = $testContainer.dxForm("instance");
// act
var editor = form.getEditor("Employee.City");
editor.option("value", "New York");
// assert
var formData = form.option("formData");
assert.deepEqual(formData, { Employee: { City: "New York" } }, "formData is updated");
assert.equal($testContainer.find(".dx-field-item").length, 1, "form item is rendered");
});
QUnit.test("From renders the right types of editors by default", function(assert) {
// arrange
var $testContainer = $("#form").dxForm({
formData: { id: 1, name: "Name" }
});
// assert
assert.ok($testContainer.find(".dx-field-item .dx-numberbox").hasClass("dx-editor-outlined"), "right class rendered");
assert.ok($testContainer.find(".dx-field-item .dx-textbox").hasClass("dx-editor-outlined"), "right class rendered");
});
QUnit.test("From renders the right types of editors according to stylingMode option", function(assert) {
// arrange
var $testContainer = $("#form").dxForm({
formData: { id: 1, name: "Name" },
stylingMode: "underlined"
});
// assert
assert.ok($testContainer.find(".dx-field-item .dx-numberbox").hasClass("dx-editor-underlined"), "right class rendered");
assert.ok($testContainer.find(".dx-field-item .dx-textbox").hasClass("dx-editor-underlined"), "right class rendered");
});
QUnit.module("Tabs", {
beforeEach: function() {
var that = this;
that.clock = sinon.useFakeTimers();
responsiveBoxScreenMock.setup.call(this, 1200);
},
afterEach: function() {
this.clock.restore();
responsiveBoxScreenMock.teardown.call(this);
}
});
QUnit.test("items aren't tiny", function(assert) {
// arrange, act
let testContainer = $("#form");
testContainer.dxForm({
formData: {
firstName: "John",
lastName: "Smith",
sex: true,
order: 101,
photo: "image.png",
address: {
city: "Test City",
room: 11,
house: 7,
street: "Test street"
}
},
items: [
{
itemType: "group",
colCount: 2,
items: ["firstName", "lastName"]
},
{
itemType: "tabbed",
tabPanelOptions: { animationEnabled: true },
tabs: [
{
title: "Address1",
items: ["address.city", "address.street"]
},
{
title: "Address2",
items: ["address.room", "address.house"]
}]
}]
});
// assert
assert.ok(testContainer.find(".dx-multiview-item .dx-textbox").first().width() / testContainer.width() > 0.5, "Editors are not tiny");
});
QUnit.test("Render tabs when formData is changed", function(assert) {
// arrange, act
var testContainer = $("#form"),
form = testContainer.dxForm({
formData: {
firstName: "John",
lastName: "Smith",
order: 101,
photo: "image.png",
address: {
city: "Test City",
room: 11,
house: 7,
street: "Test street"
}
},
items: [
{
itemType: "tabbed",
tabs: [
{
title: "Other1",
items: [{
itemType: "group",
colCount: 2,
items: ["firstName", "lastName"]
}, {
itemType: "group",
items: ["address.city", "address.street"]
}]
},
{
title: "Other2",
items: [{
itemType: "group",
colCount: 2,
items: ["address.room", "address.house"]
}]
}]
}]
}).dxForm("instance"),
$groups = testContainer.find(".dx-item-selected " + "." + internals.FORM_GROUP_CLASS);
// act
form.option("formData", {
firstName: "Test Name",
lastName: "Test Last Name",
order: 102,
photo: "image3.png",
address: {
city: "New City",
room: 1,
house: 3,
street: "New street"
} });
this.clock.tick();
// assert
$groups = testContainer.find(".dx-item-selected " + "." + internals.FORM_GROUP_CLASS);
assert.equal($groups.length, 2);
assert.equal($groups.eq(0).find("." + internals.FIELD_ITEM_CLASS).length, 2, "group 1");
assert.equal($groups.eq(1).find("." + internals.FIELD_ITEM_CLASS).length, 2, "group 2");
// act
testContainer.find(".dx-tabpanel").dxTabPanel("instance").option("selectedIndex", 1);
this.clock.tick();
$groups = testContainer.find(".dx-item-selected " + "." + internals.FORM_GROUP_CLASS);
// assert
assert.equal($groups.length, 1);
assert.equal($groups.eq(0).find("." + internals.FIELD_ITEM_CLASS).length, 2, "group 1");
});
QUnit.test("Check align labels", function(assert) {
// arrange, act
var testContainer = $("#form"),
form = testContainer.dxForm({
formData: {
firstName: "John",
lastName: "Smith",
order: 101,
photo: "image.png",
address: {
city: "Test City",
room: 11,
house: 7,
street: "Test street"
}
},
items: [
"test order", "photo personal",
{
itemType: "tabbed",
tabs: [
{
title: "Address1",
items: [{
itemType: "group",
colCount: 2,
items: ["address.city", "address.street", "address.room", "address.house"]
}]
},
{
title: "Address2",
colCount: 2,
items: ["firstName", "lastName"]
}]
}]
}).dxForm("instance"),
$labelTexts,
labelWidth,
$layoutManager,
$layoutManagers = testContainer.find("." + internals.FORM_LAYOUT_MANAGER_CLASS);
// assert
$layoutManager = $layoutManagers.eq(0);
$labelTexts = findLabelTextsInColumn($layoutManager, 0);
assert.roughEqual($labelTexts.eq(0).width(), $labelTexts.eq(1).width(), 1, "col 1");
$layoutManager = $layoutManagers.eq(1);
$labelTexts = findLabelTextsInColumn($layoutManager, 0);
labelWidth = getLabelWidth($layoutManager, form, "Address room:");
assert.roughEqual($labelTexts.eq(0).width(), labelWidth, 1, "tab 1 col 1");
$labelTexts = findLabelTextsInColumn($layoutManager, 1);
labelWidth = getLabelWidth($layoutManager, form, "Address house:");
assert.roughEqual($labelTexts.eq(1).width(), labelWidth, 1, "tab 1 col 2");
// act
testContainer.find(".dx-tabpanel").dxTabPanel("instance").option("selectedIndex", 1);
this.clock.tick();
// assert
$layoutManagers = testContainer.find("." + internals.FORM_LAYOUT_MANAGER_CLASS);
$layoutManager = $layoutManagers.eq(3);
$labelTexts = findLabelTextsInColumn($layoutManager, 0);
labelWidth = getLabelWidth($layoutManager, form, "First Name:");
assert.roughEqual($labelTexts.eq(0).width(), labelWidth, 1, "tab 2 col 1");
$labelTexts = findLabelTextsInColumn($layoutManager, 1);
labelWidth = getLabelWidth($layoutManager, form, "Last Name:");
assert.roughEqual($labelTexts.eq(0).width(), labelWidth, 1, "tab 2 col 2");
});
QUnit.test("Check align labels when layout is changed by default_T306106", function(assert) {
// arrange, act
this.updateScreenSize(500);
var testContainer = $("#form"),
form = testContainer.dxForm({
formData: {
firstName: "John",
lastName: "Smith",
order: 101,
photo: "image.png",
address: {
city: "Test City",
room: 11,
house: 7,
street: "Test street"
}
},
items: [
"test order", "photo personal",
{
itemType: "tabbed",
tabs: [
{
title: "Address1",
items: [{
itemType: "group",
colCount: 2,
items: ["address.city", "address.street", "address.room", "address.house"]
}]
},
{
title: "Address2",
colCount: 2,
items: ["firstName", "lastName"]
}]
}]
}).dxForm("instance"),
labelWidth,
labelContentWidth,
$labelsContent,
$layoutManager,
$layoutManagers = testContainer.find("." + internals.FORM_LAYOUT_MANAGER_CLASS),
i;
// assert
$layoutManager = $layoutManagers.eq(1);
$labelsContent = $layoutManager.find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS);
labelWidth = getLabelWidth($layoutManager, form, "Address house:");
for(i = 0; i < 4; i++) {
labelContentWidth = $labelsContent.eq(i).width();
assert.roughEqual(labelContentWidth, labelWidth, 1, "tab 1, item " + i);
}
// act
testContainer.find(".dx-tabpanel").dxTabPanel("instance").option("selectedIndex", 1);
this.clock.tick();
// assert
$layoutManagers = testContainer.find("." + internals.FORM_LAYOUT_MANAGER_CLASS);
$layoutManager = $layoutManagers.eq(3);
$labelsContent = $layoutManager.find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS);
labelWidth = getLabelWidth($layoutManager, form, "First Name:");
for(i = 0; i < 2; i++) {
labelContentWidth = $labelsContent.eq(i).width();
assert.roughEqual(labelContentWidth, labelWidth, 1, "tab 2, item " + i);
}
});
QUnit.test("Check align labels when layout is changed_T306106", function(assert) {
// arrange
var testContainer = $("#form"),
form = testContainer.dxForm({
formData: {
firstName: "John",
lastName: "Smith",
order: 101,
photo: "image.png",
address: {
city: "Test City",
room: 11,
house: 7,
street: "Test street"
}
},
items: [
"test order", "photo personal",
{
itemType: "tabbed",
tabs: [
{
title: "Address1",
items: [{
itemType: "group",
colCount: 2,
items: ["address.city", "address.street", "address.room", "address.house"]
}]
},
{
title: "Address2",
colCount: 2,
items: ["firstName", "lastName"]
}]
}]
}).dxForm("instance"),
labelWidth,
labelContentWidth,
$labelsContent,
$layoutManager,
$layoutManagers = testContainer.find("." + internals.FORM_LAYOUT_MANAGER_CLASS),
i;
// act
this.updateScreenSize(500);
// assert
$layoutManager = $layoutManagers.eq(1);
$labelsContent = $layoutManager.find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS);
labelWidth = getLabelWidth($layoutManager, form, "Address house:");
for(i = 0; i < 4; i++) {
labelContentWidth = $labelsContent.eq(i).width();
assert.roughEqual(labelContentWidth, labelWidth, 1, "tab 1, item " + i);
}
// act
testContainer.find(".dx-tabpanel").dxTabPanel("instance").option("selectedIndex", 1);
this.clock.tick();
// assert
$layoutManagers = testContainer.find("." + internals.FORM_LAYOUT_MANAGER_CLASS);
$layoutManager = $layoutManagers.eq(3);
$labelsContent = $layoutManager.find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS);
labelWidth = getLabelWidth($layoutManager, form, "First Name:");
for(i = 0; i < 2; i++) {
labelContentWidth = $labelsContent.eq(i).width();
assert.roughEqual(labelContentWidth, labelWidth, 1, "tab 2, item " + i);
}
});
QUnit.test("Data is updated correctly_T353275", function(assert) {
// arrange
var testContainer = $("#form"),
form = testContainer.dxForm({
formData: {
firstName: ""
},
items: [
{
itemType: "tabbed",
tabs: [
{
items: ["firstName"]
}]
}]
}).dxForm("instance");
// act
form.updateData("firstName", "Test First Name");
// assert
assert.equal(form.getEditor("firstName").option("value"), "Test First Name", "value of editor by 'firstName' field");
});
QUnit.test("Update editorOptions of an editor inside the tab", function(assert) {
// arrange
var testContainer = $("#form"),
form = testContainer.dxForm({
formData: {
firstName: "Test name"
},
items: [{
itemType: "tabbed",
tabs: [{
items: [{
dataField: "firstName",
editorOptions: {
disabled: true
}
}]
}]
}]
}).dxForm("instance");
assert.equal(form.getEditor("firstName").option("disabled"), true, "initial state: editor is disabled");
// act
form.option("items[0].tabs[0].items[0].editorOptions.disabled", false);
// assert
assert.equal(form.getEditor("firstName").option("disabled"), false, "'disabled' option was successfully changed");
});
QUnit.module("Align labels", {
beforeEach: function() {
var that = this;
that.testObject = {
"ID": 1,
"FirstName": "John",
"LastName": "Heart",
"Prefix": "Mr.",
"Position": "CEO",
"Picture": "images/employees/01.png",
"BirthDate": "1964/03/16",
"HireDate": "1995/01/15",
"Notes": "John has been in the Audio/Video industry since 1990. He has led DevAv as its CEO since 2003.\r\n\r\nWhen not working hard as the CEO, John loves to golf and bowl. He once bowled a perfect game of 300.",
"Address": "351 S Hill St.",
"StateID": 5
};
responsiveBoxScreenMock.setup.call(this, 1200);
},
afterEach: function() {
responsiveBoxScreenMock.teardown.call(this);
}
});
function getLabelWidth(container, form, text) {
var $label = form._rootLayoutManager._renderLabel({ text: text, location: "left" }).appendTo(container),
width = $label.children().first().width();
$label.remove();
return width;
}
function
|
($container, columnIndex) {
return $container.find("." + internals.FORM_FIELD_ITEM_COL_CLASS + columnIndex + " ." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS);
}
QUnit.test("Align labels in column", function(assert) {
// arrange, act
var testContainer = $("#form"),
form = testContainer.dxForm({
formData: this.testObject,
colCount: 4,
customizeItem: function(item) {
switch(item.dataField) {
case "FirstName":
case "LastName":
item.colSpan = 2;
break;
case "Prefix":
item.colSpan = 4;
break;
case "Notes":
item.colSpan = 5;
break;
case "StateID":
item.colSpan = 3;
break;
default:
}
}
}).dxForm("instance");
var $col1 = $(".dx-col-0"),
$col2 = $(".dx-col-1"),
$col3 = $(".dx-col-2"),
$col4 = $(".dx-col-3"),
$maxLabelWidth = getLabelWidth(testContainer, form, "Position:"),
i,
labelWidth;
// assert
for(i = 0; i < 4; i++) {
labelWidth = $col1.eq(i).find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS).first().width();
assert.roughEqual(labelWidth, $maxLabelWidth, 1, "col0 item " + i);
}
$maxLabelWidth = getLabelWidth(testContainer, form, "First Name:");
for(i = 0; i < 3; i++) {
labelWidth = $col2.eq(i).find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS).first().width();
assert.roughEqual(labelWidth, $maxLabelWidth, 1, "col1 item " + i);
}
$maxLabelWidth = getLabelWidth(testContainer, form, "Birth Date:");
for(i = 0; i < 2; i++) {
labelWidth = $col3.eq(i).find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS).first().width();
assert.roughEqual(labelWidth, $maxLabelWidth, 1, "col2 item " + i);
}
$maxLabelWidth = getLabelWidth(testContainer, form, "Last Name:");
for(i = 0; i < 2; i++) {
labelWidth = $col4.eq(i).find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS).first().width();
assert.roughEqual(labelWidth, $maxLabelWidth, 1, "col3 item " + i);
}
assert.equal($("." + internals.HIDDEN_LABEL_CLASS).length, 0, "hidden labels count");
});
QUnit.test("Align labels in column when labels text is identical", function(assert) {
// arrange, act
var testContainer = $("#form"),
form = testContainer.dxForm({
formData: { TestBool: true, ShipName: "Test" }
}).dxForm("instance");
var $col1 = $(".dx-col-0"),
$maxLabelWidth = getLabelWidth(testContainer, form, "Ship Name:"),
i;
// assert
for(i = 0; i < 2; i++) {
var labelWidth = $col1.eq(i).find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS).first().width();
assert.roughEqual(labelWidth, $maxLabelWidth, 1, "col0 item " + i);
}
});
QUnit.test("Disable alignItemLabels", function(assert) {
// arrange, act
var testContainer = $("#form");
testContainer.dxForm({
formData: { TestBool: true, ShipName: "Test" },
alignItemLabels: false
}).dxForm("instance");
var $labelTexts = $("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS);
// assert
assert.notEqual($labelTexts.eq(0).width(), $labelTexts.eq(1).width());
});
QUnit.test("Disable alignItemLabels in group", function(assert) {
// arrange, act
var testContainer = $("#form");
testContainer.dxForm({
formData: { TestBool: true, ShipName: "Test", Name: "John", LastName: "Smith" },
items: [
{
itemType: "group",
alignItemLabels: false,
items: ["TestBool", "ShipName"]
},
{
itemType: "group",
items: ["Name", "LastName"]
}
]
}).dxForm("instance");
var $groups = $("." + internals.FORM_GROUP_CLASS),
$labelTexts = $groups.eq(0).find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS);
// assert
assert.notEqual($labelTexts.eq(0).width(), $labelTexts.eq(1).width(), "group 1");
$labelTexts = $groups.eq(1).find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS);
assert.equal($labelTexts.eq(0).width(), $labelTexts.eq(1).width(), "group 2");
});
QUnit.test("Align labels in column when alignItemLabelsInAllGroups is enabled", function(assert) {
// arrange, act
var testContainer = $("#form"),
form = testContainer.dxForm({
colCount: 2,
formData: {
firstName: "John",
lastName: "Smith",
middleName: "Test Middle Name",
order: 101,
photo: "image.png",
address: {
city: "Test City",
room: 11,
house: 7,
street: "Test street"
}
},
items: [
{
itemType: "group",
colCount: 3,
items: ["firstName", "lastName", "middleName"]
},
{
itemType: "group",
colCount: 2,
items: ["photo", "order"]
},
{
itemType: "group",
colCount: 2,
items: ["address.city", "address.street"]
},
{
itemType: "group",
colCount: 2,
items: ["address.room", "address.house"]
}]
}).dxForm("instance"),
labelWidth,
textWidth,
$groups,
$texts,
i;
// assert
$groups = form._getGroupElementsInColumn(testContainer, 0);
$texts = findLabelTextsInColumn($groups, 0);
labelWidth = getLabelWidth(testContainer, form, "Address city:");
for(i = 0; i < 2; i++) {
textWidth = $texts.eq(i).width();
assert.roughEqual(textWidth, labelWidth, 1, "group col 1, col1 item " + i);
}
$texts = findLabelTextsInColumn($groups, 1);
assert.roughEqual($texts.eq(0).width(), getLabelWidth(testContainer, form, "Last Name:"), 1, "group col 1, col2 item 1");
assert.roughEqual($texts.eq(1).width(), getLabelWidth(testContainer, form, "Address street:"), 1, "group col 1, col2 item 2");
$texts = findLabelTextsInColumn($groups, 2);
labelWidth = getLabelWidth(testContainer, form, "Middle Name:");
assert.roughEqual($texts.eq(0).width(), labelWidth, 1, "group col 1, col3 item 1");
$groups = form._getGroupElementsInColumn(testContainer, 1);
$texts = findLabelTextsInColumn($groups, 0);
labelWidth = getLabelWidth(testContainer, form, "Address room:");
for(i = 0; i < 2; i++) {
textWidth = $texts.eq(i).width();
assert.roughEqual(textWidth, labelWidth, 1, "group col 2, col1 item " + i);
}
$texts = findLabelTextsInColumn($groups, 1);
labelWidth = getLabelWidth(testContainer, form, "Address house:");
for(i = 0; i < 2; i++) {
textWidth = $texts.eq(i).width();
assert.roughEqual(textWidth, labelWidth, 1, "group col , col2 item " + i);
}
});
QUnit.test("Align labels in column when alignItemLabelsInAllGroups is disabled", function(assert) {
// arrange, act
var testContainer = $("#form"),
form = testContainer.dxForm({
colCount: 2,
alignItemLabelsInAllGroups: false,
formData: {
firstName: "John",
lastName: "Smith",
order: 101,
photo: "image.png",
address: {
city: "Test City",
room: 11,
house: 7,
street: "Test street"
}
},
items: [
{
itemType: "group",
colCount: 2,
items: ["firstName", "lastName"]
},
{
itemType: "group",
colCount: 1,
items: ["photo", "order"]
},
{
itemType: "group",
colCount: 2,
items: ["address.city", "address.street"]
},
{
itemType: "group",
colCount: 2,
items: ["address.room", "address.house"]
}]
}).dxForm("instance"),
$groups;
// assert
$groups = form._getGroupElementsInColumn(testContainer, 0);
assert.notEqual(findLabelTextsInColumn($groups.eq(0), 0).eq(0).width(), findLabelTextsInColumn($groups.eq(1), 0).eq(0).width(), "compare group1 with group2");
$groups = form._getGroupElementsInColumn(testContainer, 1);
assert.notEqual(findLabelTextsInColumn($groups.eq(0), 0).eq(0).width(), findLabelTextsInColumn($groups.eq(1), 0).eq(0).width(), "compare group1 with group2");
});
QUnit.test("Align labels in columns when there are rows", function(assert) {
// arrange, act
var testContainer = $("#form"),
form = testContainer.dxForm({
formData: this.testObject,
colCount: 4,
items: [{
name: "fieldFirstValue",
colSpan: 2,
editorType: "dxTextBox",
label: {
text: "Field 1"
}
},
{
name: "fieldSecondValue",
colSpan: 2,
editorType: "dxTextBox",
label: {
text: "Field 2"
}
},
{
name: "fieldThirdValue",
colSpan: 2,
editorType: "dxTextBox",
label: {
text: "Field three"
}
},
{
name: "fieldFourthValue",
colSpan: 2,
editorType: "dxTextBox",
label: {
text: "Field four"
}
}
]
}).dxForm("instance");
var $col1 = $(".dx-col-0"),
$col2 = $(".dx-col-2"),
$maxLabelWidth = getLabelWidth(testContainer, form, "Field three:"),
i,
labelWidth;
// assert
for(i = 0; i < 2; i++) {
labelWidth = $col1.eq(i).find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS).first().width();
assert.roughEqual(labelWidth, $maxLabelWidth, 1, "col0 item " + i);
}
$maxLabelWidth = getLabelWidth(testContainer, form, "Field four:");
for(i = 0; i < 2; i++) {
labelWidth = $col2.eq(i).find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS).first().width();
assert.roughEqual(labelWidth, $maxLabelWidth, 1, "col2 item " + i);
}
});
QUnit.test("Change option after group rendered (check for cycling template render)", function(assert) {
// arrange
var $formContainer = $("#form").dxForm({
formData: {
firstName: "John",
lastName: "Rightman"
},
items: [
{
itemType: "group",
caption: "Personal",
items: [
{
dataField: "firstName"
},
{
dataField: "lastName"
}
]
}]
}),
$fieldItemWidgets;
// act
$formContainer.dxForm("instance").option("colCount", 4);
$fieldItemWidgets = $formContainer.find("." + internals.FIELD_ITEM_CONTENT_CLASS);
// assert
assert.equal($fieldItemWidgets.length, 3, "Correct number of a widgets");
});
QUnit.test("Align labels when layout is changed in responsive box_T306106", function(assert) {
// arrange
var testContainer = $("#form"),
form = testContainer.dxForm({
formData: this.testObject,
colCount: 4,
customizeItem: function(item) {
switch(item.dataField) {
case "FirstName":
case "LastName":
item.colSpan = 2;
break;
case "Prefix":
item.colSpan = 4;
break;
case "Notes":
item.colSpan = 5;
break;
case "StateID":
item.colSpan = 3;
break;
default:
}
}
}).dxForm("instance");
var $labelsContent = testContainer.find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS),
$maxLabelWidth = getLabelWidth(testContainer, form, "First Name:"),
i;
// act
this.updateScreenSize(500);
// assert
for(i = 0; i < 11; i++) {
var labelWidth = $labelsContent.eq(i).width();
assert.roughEqual(labelWidth, $maxLabelWidth, 1, "item " + i);
}
assert.equal($("." + internals.HIDDEN_LABEL_CLASS).length, 0, "hidden labels count");
});
QUnit.test("Align labels when layout is changed when small window size by default_T306106", function(assert) {
// arrange
this.updateScreenSize(500);
var testContainer = $("#form"),
form = testContainer.dxForm({
formData: this.testObject,
colCount: 4,
customizeItem: function(item) {
switch(item.dataField) {
case "FirstName":
case "LastName":
item.colSpan = 2;
break;
case "Prefix":
item.colSpan = 4;
break;
case "Notes":
item.colSpan = 5;
break;
case "StateID":
item.colSpan = 3;
break;
default:
}
}
}).dxForm("instance");
var $labelsContent = testContainer.find("." + internals.FIELD_ITEM_LABEL_CONTENT_CLASS),
$maxLabelWidth = getLabelWidth(testContainer, form, "First Name:"),
i;
// assert
for(i = 0; i < 11; i++) {
var labelWidth = $labelsContent.eq(i).width();
assert.roughEqual(labelWidth, $maxLabelWidth, 1, "item " + i);
}
assert.equal($("." + internals.HIDDEN_LABEL_CLASS).length, 0, "hidden labels count");
});
QUnit.test("required mark aligned", (assert) => {
let $testContainer = $("#form").dxForm({
requiredMark: "!",
items: [{
dataField: "name",
isRequired: true
}]
});
let $labelsContent = $testContainer.find(`.${internals.FIELD_ITEM_LABEL_CONTENT_CLASS}`),
$requiredLabel = $labelsContent.find(`.${internals.FIELD_ITEM_LABEL_TEXT_CLASS}`),
$requiredMark = $labelsContent.find(`.${internals.FIELD_ITEM_REQUIRED_MARK_CLASS}`);
$labelsContent.width(200);
assert.roughEqual($labelsContent.offset().left + $requiredLabel.width(), $requiredMark.offset().left, 0.5, "position of requared mark is right");
assert.ok($requiredLabel.position().left < $requiredMark.position().left, "required mark should be after of the text");
});
QUnit.test("optional mark aligned", (assert) => {
let $testContainer = $("#form").dxForm({
optionalMark: "optMark",
showOptionalMark: true,
items: ["position"]
});
let $labelsContent = $testContainer.find(`.${internals.FIELD_ITEM_LABEL_CONTENT_CLASS}`),
$optionalLabel = $labelsContent.find(`.${internals.FIELD_ITEM_LABEL_TEXT_CLASS}`),
$optionalMark = $labelsContent.find(`.${internals.FIELD_ITEM_OPTIONAL_MARK_CLASS}`);
$labelsContent.width(200);
assert.roughEqual($labelsContent.offset().left + $optionalLabel.width(), $optionalMark.offset().left, 0.5, "position of optional mark is right");
assert.ok($optionalLabel.position().left < $optionalMark.position().left, "optional mark should be after of the text");
});
QUnit.module("Public API", {
beforeEach: function() {
this.clock = sinon.useFakeTimers();
},
afterEach: function() {
this.clock.restore();
}
});
QUnit.test("UpdateData, simple case", function(assert) {
// arrange
var $testContainer = $("#form");
$testContainer.dxForm({
formData: { test1: "abc", test2: "xyz" }
});
// act
var form = $testContainer.dxForm("instance");
form.updateData("test2", "qwerty");
// assert
assert.equal(form.option("formData.test2"), "qwerty", "Correct data");
});
QUnit.test("UpdateData, update with object", function(assert) {
// arrange
var $testContainer = $("#form");
$testContainer.dxForm({
items: ["test1", "test2", { dataField: "test3.SuperMan" }, { dataField: "test3.Specialization.good" }],
formData: {
test1: "abc", test2: "xyz", test3: {
SuperMan: "Kent",
Specialization: {
good: true
}
}
}
});
// act
var form = $testContainer.dxForm("instance");
form.updateData({
test1: "xyz", test2: "qwerty", test3: {
SuperMan: "KAndrew",
Specialization: {
good: false
}
}
});
// assert
assert.deepEqual(form.option("formData"), {
test1: "xyz", test2: "qwerty", test3: {
SuperMan: "KAndrew",
Specialization: {
good: false
}
}
}, "updated data");
assert.equal(form.getEditor("test1").option("value"), "xyz", "editor's value of 'test1' data field");
assert.equal(form.getEditor("test2").option("value"), "qwerty", "editor's value of 'test2' data field");
assert.equal(form.getEditor("test3.SuperMan").option("value"), "KAndrew", "editor's value of 'test3.SuperMan' data field");
assert.ok(!form.getEditor("test3.Specialization.good").option("value"), "editor's value of 'test3.Specialization.good' data field");
});
QUnit.test("Get button instance", function(assert) {
var form = $("#form").dxForm({
items: [{
itemType: "button",
name: "button1",
buttonOptions: { text: "button1" }
}, {
itemType: "group",
items: [{
itemType: "button",
name: "button2",
buttonOptions: { text: "button2" }
}]
}, {
itemType: "button",
buttonOptions: { text: "button3" }
}]
}).dxForm("instance");
var formInvalidateSpy = sinon.spy(form, "_invalidate");
assert.strictEqual(form.getButton("button1").option("text"), "button1");
assert.strictEqual(form.getButton("button2").option("text"), "button2");
assert.strictEqual(form.getButton("button3"), undefined);
form.option("items[1].items[0].buttonOptions.text", "changed_button_text");
assert.strictEqual(form.getButton("button2").option("text"), "changed_button_text");
assert.strictEqual(formInvalidateSpy.callCount, 0, "Invalidate does not called");
});
QUnit.testInActiveWindow("Change 'Button.icon'", function(assert) {
["option", "itemOption", "editor.option"].forEach(function(setOptionWay) {
var form = $("#form").dxForm({
items: [{
itemType: "button",
name: "button1",
buttonOptions: { icon: "icon1" }
}]
}).dxForm("instance");
if(device.real().deviceType === "desktop") {
$("#form").find(".dx-button").focus();
assert.ok($("#form").find(".dx-button").is(":focus"), "initial focus");
}
switch(setOptionWay) {
case "option":
form.option("items[0].buttonOptions.icon", "icon2");
break;
case "itemOption": {
const buttonOptions = form.itemOption("button1").buttonOptions;
buttonOptions.icon = "icon2";
form.itemOption("button1", "buttonOptions", buttonOptions);
break;
}
case "editor.option":
form.getButton("button1").option("icon", "icon2");
break;
}
assert.strictEqual(form.getButton("button1").option("icon"), "icon2");
if(device.real().deviceType === "desktop") {
assert.ok($("#form").find(".dx-button").is(":focus") === (setOptionWay !== "itemOption"), "final focus");
}
});
});
QUnit.test("Get editor instance", function(assert) {
// arrange
var $testContainer = $("#form");
$testContainer.dxForm({
formData: { test1: "abc", test2: "xyz" },
items: ["test1", { name: "test3", editorType: "dxNumberBox" }]
});
// act
var form = $testContainer.dxForm("instance");
// assert
assert.ok(!typeUtils.isDefined(form.getEditor("test2")), "We hasn't instance for 'test2' field");
assert.ok(typeUtils.isDefined(form.getEditor("test1")), "We have instance for 'test1' field");
assert.ok(typeUtils.isDefined(form.getEditor("test3")), "We have instance for 'test3' field");
assert.equal(form.getEditor("test1").NAME, "dxTextBox", "It's textbox");
assert.equal(form.getEditor("test3").NAME, "dxNumberBox", "It's numberBox");
});
QUnit.test("Get editor instance with group config", function(assert) {
// arrange
var $testContainer = $("#form");
$testContainer.dxForm({
formData: { test1: "abc", test2: "xyz" },
items: [
"test1",
{
itemType: "group",
items: [{ dataField: "test2", editorType: "dxTextArea" }, { name: "test3", editorType: "dxTextBox" }]
}
]
});
// act
var form = $testContainer.dxForm("instance");
// assert
assert.ok(typeUtils.isDefined(form.getEditor("test1")), "We have instance for 'test1' field");
assert.ok(typeUtils.isDefined(form.getEditor("test2")), "We have instance for 'test2' field");
assert.ok(typeUtils.isDefined(form.getEditor("test3")), "We have instance for 'test3' field");
assert.equal(form.getEditor("test2").NAME, "dxTextArea", "It's textArea");
assert.equal(form.getEditor("test3").NAME, "dxTextBox", "It's textBox");
});
QUnit.test("UpdateDimensions", function(assert) {
// arrange
var $testContainer = $("#form");
$testContainer.dxForm({
height: 200,
formData: { test1: "abc", test2: "xyz", test3: "123" },
items: ["test1", "test2", "test3", {
template: function() {
return $("<div/>")
.attr("id", "testBlock")
.css({ height: 300, "backgroundColor": "red" });
}
}]
});
// act
var form = $testContainer.dxForm("instance"),
isSizeUpdated;
$("#testBlock").hide();
form.updateDimensions().done(function() {
isSizeUpdated = true;
});
this.clock.tick();
// assert
assert.ok(isSizeUpdated);
});
function triggerKeyUp($element, key) {
var e = $.Event("keyup");
e.key = key;
$($element.find("input").first()).trigger(e);
}
QUnit.test("Check component instance onEditorEnterKey", function(assert) {
// arrange
var testArgs,
editor,
form;
form = $("#form").dxForm({
formData: {
name: "Kyle",
work: "MexCo"
},
onEditorEnterKey: function(args) {
testArgs = args;
}
}).dxForm("instance");
// act
editor = form.getEditor("work");
triggerKeyUp(editor.$element(), "Enter");
// assert
assert.notEqual(testArgs.component, undefined, "component");
assert.notEqual(testArgs.element, undefined, "element");
assert.notEqual(testArgs.event, undefined, "Event");
assert.equal(testArgs.dataField, "work", "dataField");
assert.equal(testArgs.component.NAME, "dxForm", "correct component");
});
QUnit.test("Use 'itemOption' with no items", function(assert) {
// arrange
var $testContainer = $("#form").dxForm({
height: 200,
formData: { test1: "abc", test2: "xyz", test3: "123" }
}),
form = $testContainer.dxForm("instance");
// act
var testItem = form.itemOption("test2");
form.itemOption("test3", "label", { text: "NEWLABEL" });
// assert
assert.deepEqual(testItem, { dataField: "test2" }, "corrected item received");
assert.equal($testContainer.find("." + internals.FIELD_ITEM_LABEL_CLASS).last().text(), "NEWLABEL:", "new label rendered");
});
QUnit.test("Use 'itemOption' do not change the order of an items", function(assert) {
// arrange
var $testContainer = $("#form").dxForm({
height: 200,
formData: { ID: 1, FistName: "Alex", LastName: "Johnson", Address: "Alabama" },
items: [
"ID",
{ dataField: "FirstName" },
{ dataField: "LastName" },
"Address"
]
}),
form = $testContainer.dxForm("instance");
// act
form.itemOption("FirstName", {
visible: true,
editorOptions: {
value: "",
useMaskedValue: true,
placeholder: "CNPJ",
mask: "000.000.000-00"
}
});
// assert
assert.deepEqual(
form.option("items"),
[
{ dataField: "ID" },
{
dataField: "FirstName",
visible: true,
editorOptions: {
value: "",
useMaskedValue: true,
placeholder: "CNPJ",
mask: "000.000.000-00"
}
},
{ dataField: "LastName" },
{ dataField: "Address" }
],
"correct items order");
});
QUnit.test("Use 'itemOption' with groups", function(assert) {
// arrange
var $testContainer = $("#form").dxForm({
height: 200,
formData: { EmployeeID: 1, LastName: "John", FirstName: "Dow", BirthData: "01/01/1970", HireDate: "12/11/1995" },
items: [
{
itemType: "group",
items: [
{
itemType: "group",
caption: "Personal",
items: [{
itemType: "group",
caption: "Full Name",
colCount: 3,
items: ["EmployeeID", "LastName", "FirstName"]
}, {
itemType: "group",
caption: "Dates",
items: ["BirthDate", "HireDate"]
}]
}
]
}
]
}
),
form = $testContainer.dxForm("instance");
// act
var unknownField = form.itemOption("FirstName"),
firstGroup = form.itemOption("Personal"),
secondGroup = form.itemOption("Personal.FullName"),
innerOption = form.itemOption("Personal.FullName.FirstName");
form.itemOption("Personal.Dates.HireDate", "label", { text: "NEWLABEL" });
// assert
assert.equal(unknownField, undefined, "corrected item received");
assert.deepEqual({ itemType: firstGroup.itemType, caption: firstGroup.caption }, { itemType: "group", caption: "Personal" }, "corrected item received");
assert.deepEqual({ itemType: secondGroup.itemType, caption: secondGroup.caption }, { itemType: "group", caption: "Full Name" }, "corrected item received");
assert.equal(innerOption.dataField, "FirstName", "corrected item received");
assert.equal($testContainer.find("." + internals.FIELD_ITEM_LABEL_CLASS).last().text(), "NEWLABEL:", "new label rendered");
});
QUnit.test("Use 'itemOption' with groups and one group has empty caption (T359214)", function(assert) {
// arrange
var $testContainer = $("#form").dxForm({
height: 200,
items: [
{
itemType: 'group',
caption: '',
items: [
{
itemType: 'simple',
dataField: 'Sequence',
editType: 'dxTextBox'
},
{
itemType: 'simple',
dataField: 'AgentID',
editorType: 'dxTextBox'
}
]
},
{
itemType: 'group',
caption: 'TestGroup1',
items: [
{
itemType: 'group',
caption: 'Tax',
items: [
{
itemType: 'simple',
dataField: 'IsResident',
editorType: 'dxTextBox'
},
{
itemType: 'simple',
dataField: 'Minor',
editorType: 'dxTextBox'
}
]
},
{
itemType: 'group',
caption: 'TestGroup2',
items: [
{
itemType: 'simple',
dataField: 'DIN',
editorType: 'dxTextBox'
}
],
}
]
}
]
}
),
form = $testContainer.dxForm("instance");
// act
form.itemOption("TestGroup1.TestGroup2", "caption", "custom");
// assert
assert.equal($testContainer.find("." + internals.FORM_GROUP_CAPTION_CLASS).last().text(), "custom", "new caption rendered");
});
QUnit.test("Use 'itemOption' with tabs", function(assert) {
// arrange
var $testContainer = $("#form").dxForm({
formData: { EmployeeID: 1, LastName: "John", FirstName: "Dow", BirthData: "01/01/1970", HireDate: "12/11/1995", Country: "USA", City: "Phoenix", Region: "Arizona", Title: "Ms" },
items: [
"EmployeeID", "FirstName", "LastName",
{
itemType: "tabbed",
tabs: [
{
title: "Dates",
items: ["BirthDate", "HireDate"]
},
{
title: "Address",
colCount: 2,
items: ["Country", "City", "Region"]
},
{
title: "Title",
items: ["Title"]
}
]
}
] }
),
form = $testContainer.dxForm("instance");
// act
var tabItem = form.itemOption("Address"),
innerTabItem = form.itemOption("Address.Country");
form.itemOption("Dates.HireDate", "label", { text: "NEWLABEL" });
// assert
assert.deepEqual(tabItem, {
title: "Address",
colCount: 2,
items: [{ dataField: "Country" }, { dataField: "City" }, { dataField: "Region" }]
}, "Correct tab's item");
assert.equal(innerTabItem.dataField, "Country", "corrected item received");
assert.equal($testContainer.find("." + internals.FIELD_ITEM_LABEL_CLASS).eq(4).text(), "NEWLABEL:", "new label rendered");
});
QUnit.test("'itemOption' should get an item with several spaces in the caption", function(assert) {
// arrange
var $testContainer = $("#form").dxForm({
formData: { EmployeeID: 1, LastName: "John", FirstName: "Dow" },
items: [
"EmployeeID",
{
itemType: "group",
caption: "Test group item",
items: [
"FirstName", "LastName"
]
}
] }
),
form = $testContainer.dxForm("instance");
// act
var groupItem = form.itemOption("Testgroupitem"),
innerGroupItem = form.itemOption("Testgroupitem.FirstName");
assert.deepEqual(groupItem, {
itemType: "group",
caption: "Test group item",
items: [ { dataField: "FirstName" }, { dataField: "LastName" }]
}, "Correct group item");
form.itemOption("Testgroupitem.LastName", "label", { text: "NEWLABEL" });
// assert
assert.equal(innerGroupItem.dataField, "FirstName", "corrected item received");
assert.equal($testContainer.find("." + internals.FIELD_ITEM_LABEL_CLASS).last().text(), "NEWLABEL:", "new label rendered");
});
QUnit.test("'itemOption' should get an item with several spaces in the caption and long path", function(assert) {
// arrange
var $testContainer = $("#form").dxForm({
formData: { EmployeeID: 1, LastName: "John", FirstName: "Dow" },
items: [
"EmployeeID",
{
itemType: "group",
caption: "Test group 1",
items: [{
itemType: "group",
caption: "Test group 2",
items: ["FirstName", "LastName"]
}]
}
] }
),
form = $testContainer.dxForm("instance");
// act
var innerGroupItem = form.itemOption("Testgroup1.Testgroup2.FirstName");
// assert
assert.deepEqual(innerGroupItem, { dataField: "FirstName" }, "corrected item received");
});
QUnit.test("'itemOption' should get an group inner item located into tabbed item", function(assert) {
// arrange
var $testContainer = $("#form").dxForm({
formData: { EmployeeID: 1, LastName: "John", FirstName: "Dow" },
items: [
{
itemType: "tabbed",
tabs: [{
title: "Test Tab 1",
items: ["EmployeeID"]
}, {
title: "Test Tab 2",
items: [{
itemType: "group",
caption: "Test Group 1",
items: ["FirstName", "LastName"]
}]
}]
}]
}),
form = $testContainer.dxForm("instance");
// act
var innerGroupItem = form.itemOption("TestTab2.TestGroup1.FirstName");
// assert
assert.deepEqual(innerGroupItem, { dataField: "FirstName" }, "corrected item received");
});
QUnit.test("'itemOption' should get item by composite path use the name option", function(assert) {
// arrange
var $testContainer = $("#form").dxForm({
formData: {
LastName: "Last Name"
},
items: [{
itemType: "group",
caption: "My Custom Group",
name: "testGroup",
items: [{
itemType: "tabbed",
tabs: [{
title: "My Custom Tab",
name: "tab1",
items: [{
name: "simpleItem",
dataField: "LastName"
}]
}]
}]
}]
}),
form = $testContainer.dxForm("instance");
// act
var item = form.itemOption("testGroup.tab1.simpleItem");
// assert
assert.deepEqual(item.dataField, "LastName", "data field of item");
});
QUnit.test("'itemOption' should get a group item by the name option", function(assert) {
// arrange
var $testContainer = $("#form").dxForm({
formData: {
LastName: "Last Name"
},
items: [{
itemType: "group",
name: "testGroup",
items: [{
name: "simpleItem",
dataField: "LastName"
}]
}]
});
// act
var item = $testContainer.dxForm("instance").itemOption("testGroup");
// assert
assert.ok(!!item, "get a group item");
assert.equal(item.itemType, "group", "It's a group item");
assert.deepEqual(item.items, [{
name: "simpleItem",
dataField: "LastName"
}], "has correct items");
});
QUnit.test("Changing an editor/button options of an any item does not invalidate whole form (T311892, T681241)", function(assert) {
// arrange
var form = $("#form").dxForm({
formData: {
lastName: "Kyle",
firstName: "John"
},
items: [
{ dataField: "firstName", editorType: "dxTextBox", editorOption: { width: 100, height: 20 } },
{ dataField: "lastName", editorType: "dxTextBox", editorOption: { width: 100, height: 20 } },
{ itemType: "button", buttonOptions: { width: 100, height: 20 } }
]
}).dxForm("instance"),
formInvalidateSpy = sinon.spy(form, "_invalidate");
// act
form.option("items[1].editorOptions", { width: 80, height: 40 });
form.option("items[2].buttonOptions", { width: 10, height: 20 });
// assert
var editor = $("#form .dx-textbox").last().dxTextBox("instance"),
button = $("#form .dx-button").last().dxButton("instance");
assert.deepEqual(form.option("items[1].editorOptions"), { width: 80, height: 40 }, "correct editor options");
assert.deepEqual(form.option("items[2].buttonOptions"), { width: 10, height: 20 }, "correct button options");
assert.equal(formInvalidateSpy.callCount, 0, "Invalidate does not called");
assert.equal(editor.option("width"), 80, "Correct editor width");
assert.equal(editor.option("height"), 40, "Correct editor height");
assert.equal(button.option("width"), 10, "Correct button width");
assert.equal(button.option("height"), 20, "Correct button height");
});
QUnit.test("Changing editorOptions of subitem change editor options (T316522)", function(assert) {
// arrange
var form = $("#form").dxForm({
formData: {
lastName: "Kyle",
firstName: "John"
},
items: [
{
itemType: "group", items: [
{
itemType: "group", items: [
{ dataField: "firstName", editorType: "dxTextBox", editorOptions: { width: 100, height: 20 } },
{ dataField: "lastName", editorType: "dxTextBox", editorOptions: { width: 100, height: 20 } }
]
}
]
}
]
}).dxForm("instance");
// act
form.option("items[0].items[0].items[1].editorOptions", { width: 80, height: 40 });
// assert
var secondEditor = $("#form .dx-textbox").last().dxTextBox("instance");
assert.equal(secondEditor.option("width"), 80, "Correct width");
assert.equal(secondEditor.option("height"), 40, "Correct height");
});
QUnit.test("editorOptions correctly updates in case when only item name is defined", function(assert) {
// arrange
var form = $("#form").dxForm({
items: [
{
itemType: "group", items: [
{
itemType: "group", items: [
{ name: "firstName", editorType: "dxTextBox", editorOptions: { width: 100, height: 20 } },
{ name: "lastName", editorType: "dxTextBox", editorOptions: { width: 100, height: 20 } }
]
}
]
}
]
}).dxForm("instance");
var invalidateSpy = sinon.spy(form, "_invalidate");
// act
form.option("items[0].items[0].items[1].editorOptions", { width: 80, height: 40 });
// assert
var secondEditor = $("#form .dx-textbox").last().dxTextBox("instance");
assert.equal(invalidateSpy.callCount, 0, "dxForm wasn't invalidated");
assert.equal(secondEditor.option("width"), 80, "Correct width");
assert.equal(secondEditor.option("height"), 40, "Correct height");
});
QUnit.test("Reset editor's value", function(assert) {
// arrange
var form = $("#form").dxForm({
formData: {
name: "User",
lastName: "Test Last Name",
room: 1,
isDeveloper: true
},
items: ["name", "lastName", "room", "isDeveloper"]
}).dxForm("instance");
// act
form.resetValues();
// assert
assert.strictEqual(form.getEditor("name").option("value"), "", "editor for the name dataField");
assert.strictEqual(form.getEditor("lastName").option("value"), "", "editor for the lastName dataField");
assert.strictEqual(form.getEditor("room").option("value"), null, "editor for the room dataField");
assert.strictEqual(form.getEditor("isDeveloper").option("value"), false, "editor for the isDeveloper dataField");
});
QUnit.module("Adaptivity");
QUnit.test("One column screen should be customizable with screenByWidth option on init", function(assert) {
// arrange
var $form = $("#form");
$form.dxForm({
formData: {
name: "User",
lastName: "Test Last Name",
room: 1,
isDeveloper: true
},
colCount: 2,
screenByWidth: function() { return "xs"; },
items: ["name", "lastName", "room", "isDeveloper"]
});
// assert
assert.equal($form.find(".dx-layout-manager-one-col").length, 1, "single column screen was changed");
assert.equal($form.find(".dx-single-column-item-content").length, 4, "There are 4 items in the column");
});
QUnit.test("One column screen should be customizable with screenByWidth option on option change", function(assert) {
// arrange
var $form = $("#form"),
form = $form.dxForm({
formData: {
name: "User",
lastName: "Test Last Name",
room: 1,
isDeveloper: true
},
colCount: 2,
screenByWidth: function() { return "md"; },
items: ["name", "lastName", "room", "isDeveloper"]
}).dxForm("instance");
assert.equal($form.find(".dx-single-column-item-content").length, 0, "There are no single column items");
// act
form.option("screenByWidth", function() { return "xs"; });
// assert
assert.equal($form.find(".dx-single-column-item-content").length, 4, "There are 4 items in the column");
assert.equal($form.find(".dx-layout-manager-one-col").length, 1, "single column screen was changed");
});
QUnit.test("Column count may depend on screen factor", function(assert) {
// arrange
var $form = $("#form"),
screen = "md";
$form.dxForm({
formData: {
name: "User",
lastName: "Test Last Name",
room: 1,
isDeveloper: true
},
colCountByScreen: {
sm: 1,
md: 2
},
screenByWidth: function() { return screen; },
items: ["name", "lastName", "room", "isDeveloper"]
});
assert.equal($form.find(".dx-first-col.dx-last-col").length, 0, "more than one column exists");
// act
screen = "sm";
resizeCallbacks.fire();
// assert
assert.equal($form.find(".dx-first-col.dx-last-col").length, 4, "only one column exists");
});
QUnit.test("Column count ignores hide/show scroller when rerendering if screen factor changed", function(assert) {
var originalGetDocumentElement = domAdapter.getDocumentElement;
try {
var largeScreenWidth = 1200,
mediumScreenWidth = 1199,
width = largeScreenWidth,
height = 300,
scrollerWidth = 17;
domAdapter.getDocumentElement = function() {
return {
clientWidth: width,
clientHeight: height
};
};
var $form = $("#form");
$form.dxForm({
labelLocation: "left",
colCountByScreen: {
lg: 2,
md: 1
},
items: [
{
name: "f1", editorType: "dxTextBox",
editorOptions: {
onDisposing: function() {
width = mediumScreenWidth + scrollerWidth;
}
}
},
"f2"
]
});
assert.equal($form.find(".dx-col-0").length, 1, "(.dx-col-0).length initial");
assert.equal($form.find(".dx-col-1").length, 1, "(.dx-col-1).length initial");
width = mediumScreenWidth;
resizeCallbacks.fire();
assert.equal($form.find(".dx-col-0").length, 2, "(.dx-col-0).length current");
assert.equal($form.find(".dx-col-1").length, 0, "(.dx-col-1).length current");
} finally {
domAdapter.getDocumentElement = originalGetDocumentElement;
}
});
QUnit.test("Form should repaint once when screen factor changed", function(assert) {
// arrange
var $form = $("#form"),
screen = "md",
form = $form.dxForm({
formData: {
name: "User",
lastName: "Test Last Name",
room: 1,
isDeveloper: true
},
colCountByScreen: {
sm: 1,
md: 2
},
screenByWidth: function() { return screen; },
items: ["name", "lastName", "sex", "room", "isDeveloper"]
}).dxForm("instance"),
refreshStub = sinon.stub(form, "_refresh");
// act
screen = "sm";
resizeCallbacks.fire();
resizeCallbacks.fire();
// assert
assert.equal(refreshStub.callCount, 1, "refresh called once");
});
QUnit.test("Form doesn't redraw layout when colCount doesn't changed", function(assert) {
// arrange
var $form = $("#form"),
screen = "md",
form = $form.dxForm({
screenByWidth: function() {
return screen;
},
items: [{
name: "test",
editorType: "dxTextBox",
editorOptions: {
value: "Test"
}
}]
}).dxForm("instance");
// act
form.getEditor("test").option("value", "Changed");
screen = "sm";
resizeCallbacks.fire();
// assert
assert.equal(form.getEditor("test").option("value"), "Changed", "Editor keeps old value");
});
QUnit.test("Form doesn't redraw layout when colCount doesn't changed and colCountByScreen option defined", function(assert) {
// arrange
var $form = $("#form"),
screen = "md",
form = $form.dxForm({
screenByWidth: function() {
return screen;
},
colCountByScreen: {
sm: 2,
md: 2
},
items: [{
name: "test",
editorType: "dxTextBox",
editorOptions: {
value: "Test"
}
}]
}).dxForm("instance");
// act
form.getEditor("test").option("value", "Changed");
screen = "sm";
resizeCallbacks.fire();
// assert
assert.equal(form.getEditor("test").option("value"), "Changed", "Editor keeps old value");
});
QUnit.test("Form is not redrawn when colCount doesn't change ('colCount' and 'colCountByScreen' options are defined)", function(assert) {
// arrange
var $form = $("#form"),
screen = "md",
initCount = 0;
$form.dxForm({
screenByWidth: function() {
return screen;
},
colCount: 1, // xs and lg screens have an equal colCount
colCountByScreen: {
sm: 2,
md: 2
},
items: [{
name: "test",
editorType: "dxTextBox",
editorOptions: {
onInitialized: function() {
initCount++;
}
}
}]
});
// act, assert
assert.equal(initCount, 1, "Editor is initialized");
screen = "sm";
resizeCallbacks.fire();
assert.equal(initCount, 1, "colCount doesn't changed, editor doesn't rerender");
screen = "lg";
resizeCallbacks.fire();
assert.equal(initCount, 2, "colCount is changed, editor is rerender");
screen = "xs";
resizeCallbacks.fire();
assert.equal(initCount, 2, "colCount doesn't changed, editor doesn't rerender");
});
QUnit.test("Column count for group may depend on screen factor", function(assert) {
// arrange
var $form = $("#form"),
screen = "md";
$form.dxForm({
formData: {
name: "User",
lastName: "Test Last Name",
gender: "Male",
room: 1,
isDeveloper: true
},
screenByWidth: function() { return screen; },
items: [{
itemType: "group",
caption: "Group 1",
colCount: 1,
colCountByScreen: {
sm: 2,
md: 3
},
items: ["name", "lastName"]
},
{
itemType: "group",
caption: "Group 2",
colCount: 2,
colCountByScreen: {
sm: 4,
md: 1
},
items: ["sex", "room", "isDeveloper"]
}]
});
assert.equal($form.find(".dx-group-colcount-3").length, 1, "first group should have 3 columns");
assert.equal($form.find(".dx-group-colcount-1").length, 1, "second group should have 1 column");
// act
screen = "sm";
resizeCallbacks.fire();
// assert
assert.equal($form.find(".dx-group-colcount-2").length, 1, "first group should have 2 columns");
assert.equal($form.find(".dx-group-colcount-4").length, 1, "second group should have 4 columns");
});
QUnit.test("Column count for tabs may depend on screen factor", function(assert) {
// arrange
var $form = $("#form"),
screen = "md";
$form.dxForm({
formData: {
name: "User",
lastName: "Test",
gender: "Male",
room: 1,
isDeveloper: true
},
screenByWidth: function() { return screen; },
items: [{
itemType: "tabbed",
caption: "Group 1",
colCount: 1,
tabs: [{
colCountByScreen: { sm: 2, md: 3 },
items: ["name", "lastName", "gender", "room", "isDeveloper"]
}]
}]
});
assert.equal($form.find(".dx-field-item-tab.dx-col-2").length, 1, "tab has 3 groups on md screen");
// act
screen = "sm";
resizeCallbacks.fire();
// assert
assert.notOk($form.find(".dx-field-item-tab.dx-col-2").length, "tab has not 3 groups on sm screen");
assert.ok($form.find(".dx-field-item-tab.dx-col-1").length, "tab has 2 groups on sm screen");
});
QUnit.test("Cached colCount options doesn't leak", function(assert) {
// arrange
var $form = $("#form"),
instance;
instance = $form.dxForm({
formData: {
name: "User",
lastName: "Test Last Name"
},
colCount: 2,
items: [{
itemType: "group",
caption: "Group 1",
colCount: 1,
colCountByScreen: {
sm: 2,
md: 3
},
items: ["name", "lastName"]
}]
}).dxForm("instance");
assert.equal(instance._cachedColCountOptions.length, 2, "root + group item colCount options cached");
// act
instance.option("items", ["name"]);
// assert
assert.equal(instance._cachedColCountOptions.length, 1, "only root colCount options cached");
});
QUnit.test("Form refreshes only one time on dimension changed with group layout", function(assert) {
// arrange
var $form = $("#form").width(300),
screen = "md",
form = $form.dxForm({
screenByWidth: function() {
return screen;
},
colCount: "auto",
minColWidth: 100,
items: [{
name: "test1",
editorType: "dxTextBox"
}, {
itemType: "group",
caption: "Test group",
colCount: "auto",
minColWidth: 200,
items: [
{ name: "test2", editorType: "dxTextBox" },
{ name: "test3", editorType: "dxTextBox" }
]
}]
}).dxForm("instance");
var refreshSpy = sinon.spy(form, "_refresh");
// act
$form.width(100);
resizeCallbacks.fire();
// assert
assert.equal(refreshSpy.callCount, 1, "form has been redraw layout one time");
});
QUnit.test("Form redraw layout when colCount is 'auto' and an calculated colCount changed", function(assert) {
// arrange
var $form = $("#form").width(300),
screen = "md",
form = $form.dxForm({
screenByWidth: function() {
return screen;
},
colCount: "auto",
minColWidth: 100,
items: [{
name: "test1",
editorType: "dxTextBox"
}, {
name: "test2",
editorType: "dxTextBox"
}]
}).dxForm("instance");
var refreshSpy = sinon.spy(form, "_refresh");
// act
$form.width(100);
resizeCallbacks.fire();
// assert
assert.equal(refreshSpy.callCount, 1, "form has been redraw layout");
});
QUnit.module("Form when rtlEnabled is true");
QUnit.test("required mark aligned when rtlEnabled option is set to true", (assert) => {
let $testContainer = $("#form").dxForm({
requiredMark: "!",
rtlEnabled: true,
items: [{
dataField: "name",
isRequired: true
}]
});
let $labelsContent = $testContainer.find(`.${internals.FIELD_ITEM_LABEL_CONTENT_CLASS}`),
$requiredLabel = $labelsContent.find(`.${internals.FIELD_ITEM_LABEL_TEXT_CLASS}`),
$requiredMark = $labelsContent.find(`.${internals.FIELD_ITEM_REQUIRED_MARK_CLASS}`);
$labelsContent.width(200);
assert.notEqual($labelsContent.offset().left, $requiredMark.offset().left, "position of requared mark is right");
assert.ok($requiredLabel.position().left > $requiredMark.position().left, "required mark should be before of the text");
});
QUnit.test("optional mark aligned when rtlEnabled option is set to true", (assert) => {
let $testContainer = $("#form").dxForm({
optionalMark: "optMark",
showOptionalMark: true,
rtlEnabled: true,
items: ["position"]
});
let $labelsContent = $testContainer.find(`.${internals.FIELD_ITEM_LABEL_CONTENT_CLASS}`),
$optionalLabel = $labelsContent.find(`.${internals.FIELD_ITEM_LABEL_TEXT_CLASS}`),
$optionalMark = $labelsContent.find(`.${internals.FIELD_ITEM_OPTIONAL_MARK_CLASS}`);
$labelsContent.width(200);
assert.notEqual($labelsContent.offset().left, $optionalMark.offset().left, "position of optional mark is right");
assert.ok($optionalLabel.position().left > $optionalMark.position().left, "optional mark should be before of the text");
});
QUnit.module("Events");
QUnit.test("Should not skip `optionChanged` event handler that has been added on the `onInitialized` event handler", function(assert) {
var eventCalls = [];
var form = $("#form").dxForm({
formData: { firstName: "John" },
onOptionChanged: function() {
eventCalls.push("onOptionChanged");
},
onContentReady: function(e) {
e.component.on("optionChanged", function() {
eventCalls.push("optionChanged from `onContentReady`");
});
},
onInitialized: function(e) {
e.component.on('optionChanged', function() {
eventCalls.push("optionChanged from `onInitialized`");
});
}
}).dxForm("instance");
form.option("formData", { lastName: "John" });
assert.deepEqual(eventCalls, [
"optionChanged from `onInitialized`",
"optionChanged from `onContentReady`",
"onOptionChanged"
]);
});
|
findLabelTextsInColumn
|
test_cargo_new.rs
|
use std::io::{fs, USER_RWX, File, TempDir};
use std::io::fs::PathExtensions;
use std::os;
use support::{execs, paths, cargo_dir, ResultTest};
use hamcrest::{assert_that, existing_file, existing_dir, is_not};
use cargo::util::{process, ProcessBuilder};
fn setup() {
}
fn my_process(s: &str) -> ProcessBuilder {
process(s)
.cwd(paths::root())
.env("HOME", Some(paths::home()))
}
fn cargo_process(s: &str) -> ProcessBuilder {
process(cargo_dir().join("cargo")).arg(s)
.cwd(paths::root())
.env("HOME", Some(paths::home()))
}
test!(simple_lib {
os::setenv("USER", "foo");
assert_that(cargo_process("new").arg("foo").arg("--no-git"),
execs().with_status(0));
assert_that(&paths::root().join("foo"), existing_dir());
assert_that(&paths::root().join("foo/Cargo.toml"), existing_file());
assert_that(&paths::root().join("foo/src/lib.rs"), existing_file());
assert_that(&paths::root().join("foo/.gitignore"), is_not(existing_file()));
assert_that(cargo_process("build").cwd(paths::root().join("foo")),
execs().with_status(0));
})
test!(simple_bin {
os::setenv("USER", "foo");
assert_that(cargo_process("new").arg("foo").arg("--bin"),
execs().with_status(0));
assert_that(&paths::root().join("foo"), existing_dir());
assert_that(&paths::root().join("foo/Cargo.toml"), existing_file());
assert_that(&paths::root().join("foo/src/main.rs"), existing_file());
assert_that(cargo_process("build").cwd(paths::root().join("foo")),
execs().with_status(0));
assert_that(&paths::root().join(format!("foo/target/foo{}",
os::consts::EXE_SUFFIX)),
existing_file());
})
test!(simple_git {
let td = TempDir::new("cargo").unwrap();
os::setenv("USER", "foo");
assert_that(cargo_process("new").arg("foo").cwd(td.path().clone()),
execs().with_status(0));
assert_that(td.path(), existing_dir());
assert_that(&td.path().join("foo/Cargo.toml"), existing_file());
assert_that(&td.path().join("foo/src/lib.rs"), existing_file());
assert_that(&td.path().join("foo/.git"), existing_dir());
assert_that(&td.path().join("foo/.gitignore"), existing_file());
assert_that(cargo_process("build").cwd(td.path().clone().join("foo")),
execs().with_status(0));
})
test!(simple_travis {
os::setenv("USER", "foo");
assert_that(cargo_process("new").arg("foo").arg("--travis"),
execs().with_status(0));
assert_that(&paths::root().join("foo"), existing_dir());
assert_that(&paths::root().join("foo/Cargo.toml"), existing_file());
assert_that(&paths::root().join("foo/src/lib.rs"), existing_file());
assert_that(&paths::root().join("foo/.travis.yml"), existing_file());
assert_that(cargo_process("build").cwd(paths::root().join("foo")),
execs().with_status(0));
})
test!(no_argument {
assert_that(cargo_process("new"),
execs().with_status(1)
.with_stderr("Invalid arguments.
Usage:
cargo new [options] <path>
cargo new -h | --help
"));
})
test!(existing {
let dst = paths::root().join("foo");
fs::mkdir(&dst, USER_RWX).assert();
assert_that(cargo_process("new").arg("foo"),
execs().with_status(101)
.with_stderr(format!("Destination `{}` already exists\n",
dst.display())));
})
test!(invalid_characters {
assert_that(cargo_process("new").arg("foo.rs"),
execs().with_status(101)
.with_stderr("Invalid character `.` in crate name: `foo.rs`"));
})
test!(finds_author_user {
// Use a temp dir to make sure we don't pick up .cargo/config somewhere in
// the hierarchy
let td = TempDir::new("cargo").unwrap();
assert_that(cargo_process("new").arg("foo").env("USER", Some("foo"))
.cwd(td.path().clone()),
execs().with_status(0));
let toml = td.path().join("foo/Cargo.toml");
let toml = File::open(&toml).read_to_string().assert();
assert!(toml.as_slice().contains(r#"authors = ["foo"]"#));
})
test!(finds_author_username {
// Use a temp dir to make sure we don't pick up .cargo/config somewhere in
// the hierarchy
let td = TempDir::new("cargo").unwrap();
assert_that(cargo_process("new").arg("foo")
.env("USER", None::<&str>)
.env("USERNAME", Some("foo"))
.cwd(td.path().clone()),
execs().with_status(0));
let toml = td.path().join("foo/Cargo.toml");
let toml = File::open(&toml).read_to_string().assert();
assert!(toml.as_slice().contains(r#"authors = ["foo"]"#));
})
test!(finds_author_git {
my_process("git").args(["config", "--global", "user.name", "bar"])
.exec().assert();
my_process("git").args(["config", "--global", "user.email", "baz"])
.exec().assert();
assert_that(cargo_process("new").arg("foo").env("USER", Some("foo")),
execs().with_status(0));
let toml = paths::root().join("foo/Cargo.toml");
let toml = File::open(&toml).read_to_string().assert();
assert!(toml.as_slice().contains(r#"authors = ["bar <baz>"]"#));
})
test!(author_prefers_cargo {
my_process("git").args(["config", "--global", "user.name", "bar"])
.exec().assert();
my_process("git").args(["config", "--global", "user.email", "baz"])
.exec().assert();
let root = paths::root();
fs::mkdir(&root.join(".cargo"), USER_RWX).assert();
File::create(&root.join(".cargo/config")).write_str(r#"
[cargo-new]
name = "new-foo"
email = "new-bar"
git = false
"#).assert();
assert_that(cargo_process("new").arg("foo").env("USER", Some("foo")),
execs().with_status(0));
|
let toml = paths::root().join("foo/Cargo.toml");
let toml = File::open(&toml).read_to_string().assert();
assert!(toml.as_slice().contains(r#"authors = ["new-foo <new-bar>"]"#));
assert!(!root.join("foo/.gitignore").exists());
})
test!(git_prefers_command_line {
let root = paths::root();
let td = TempDir::new("cargo").unwrap();
fs::mkdir(&root.join(".cargo"), USER_RWX).assert();
File::create(&root.join(".cargo/config")).write_str(r#"
[cargo-new]
git = false
name = "foo"
email = "bar"
"#).assert();
assert_that(cargo_process("new").arg("foo").arg("--git").cwd(td.path().clone())
.env("USER", Some("foo")),
execs().with_status(0));
assert!(td.path().join("foo/.gitignore").exists());
})
test!(subpackage_no_git {
os::setenv("USER", "foo");
assert_that(cargo_process("new").arg("foo"), execs().with_status(0));
let subpackage = paths::root().join("foo").join("components");
fs::mkdir(&subpackage, USER_RWX).assert();
assert_that(cargo_process("new").arg("foo/components/subcomponent"),
execs().with_status(0));
assert_that(&paths::root().join("foo/components/subcomponent/.git"),
is_not(existing_file()));
assert_that(&paths::root().join("foo/components/subcomponent/.gitignore"),
is_not(existing_file()));
})
| |
k8s1210_openebs260_minio.go
|
package instances
import (
kurlv1beta1 "github.com/replicatedhq/kurl/kurlkinds/pkg/apis/cluster/v1beta1"
"github.com/replicatedhq/kurl/testgrid/tgrun/pkg/scheduler/types"
)
func
|
() {
RegisterAirgapAndOnlineInstance(
types.Instance{
InstallerSpec: types.InstallerSpec{
Kubernetes: &kurlv1beta1.Kubernetes{
Version: "1.20.5",
},
Weave: &kurlv1beta1.Weave{
Version: "2.8.1",
},
Docker: &kurlv1beta1.Docker{
Version: "20.10.5",
},
Registry: &kurlv1beta1.Registry{
Version: "2.7.1",
},
Kotsadm: &kurlv1beta1.Kotsadm{
Version: "latest",
},
OpenEBS: &kurlv1beta1.OpenEBS{
Version: "2.6.0",
Namespace: "openebs",
IsLocalPVEnabled: true,
LocalPVStorageClassName: "openebs",
IsCstorEnabled: true,
CstorStorageClassName: "default",
},
Ekco: &kurlv1beta1.Ekco{
Version: "0.10.1",
},
Minio: &kurlv1beta1.Minio{
Version: "latest",
Namespace: "minio",
},
},
},
)
}
|
init
|
edit-run-health-check.js
|
import React, { PureComponent } from "react";
import {
Form,
Select,
Radio,
Modal,
Input,
} from "antd";
import KVinput from "../../../components/KVinput";
import appProbeUtil from "../../../utils/appProbe-util";
const FormItem = Form.Item;
const Option = Select.Option;
const RadioGroup = Radio.Group;
// 设置、编辑运行时健康监测
@Form.create()
export default class EditRunHealthCheck extends PureComponent {
handleSubmit = (e) => {
e.preventDefault();
this.props.form.validateFields(
{
force: true,
},
(err, vals) => {
if (!err) {
this.props.onOk && this.props.onOk(vals);
}
},
);
};
checkPath = (rules, value, callback) => {
const visitType = this.props.form.getFieldValue("scheme");
if (visitType == "tcp") {
callback();
return;
}
if (visitType != "tcp" && value) {
callback();
return;
}
callback("请填写路径!");
};
render() {
const {
title, onCancel, ports,
} = this.props;
const data = this.props.data || {};
const formItemLayout = {
labelCol: {
xs: {
span: 24,
},
sm: {
span: 6,
},
},
wrapperCol: {
xs: {
span: 24,
},
sm: {
span: 16,
},
|
return (
<Modal width={700} title={title} onOk={this.handleSubmit} onCancel={onCancel} visible>
<Form onSubmit={this.handleSubmit}>
<FormItem {...formItemLayout} label="检测端口">
{getFieldDecorator("port", {
initialValue:
appProbeUtil.getPort(data) || (ports.length ? ports[0].container_port : ""),
})(<Select>
{ports.map(port => <Option key={port.container_port} value={port.container_port}>{port.container_port}</Option>)}
</Select>)}
</FormItem>
<FormItem {...formItemLayout} label="探针协议">
{getFieldDecorator("scheme", {
initialValue: data.scheme || "tcp",
})(<RadioGroup
options={[
{
label: "tcp",
value: "tcp",
},
{
label: "http",
value: "http",
},
]}
/>)}
</FormItem>
<FormItem
{...formItemLayout}
label="http请求头"
style={{
display: scheme === "tcp" ? "none" : "",
}}
>
{getFieldDecorator("http_header", {
initialValue: data.http_header || "",
})(<KVinput />)}
</FormItem>
<FormItem
{...formItemLayout}
label="路径"
style={{
display: scheme === "tcp" ? "none" : "",
}}
>
{getFieldDecorator("path", {
initialValue: data.path || "",
rules: [
{
validator: this.checkPath,
},
],
})(<Input placeholder="响应码2xx、3xx为正常" />)}
</FormItem>
<FormItem {...formItemLayout} label="初始化等候时间">
{getFieldDecorator("initial_delay_second", {
initialValue: data.initial_delay_second || "20",
rules: [
{
required: true,
message: "请填写初始化等候时间",
},
],
})(<Input
type="number"
style={{
width: "80%",
}}
/>)}
<span
style={{
marginLeft: 8,
}}
>
秒
</span>
</FormItem>
<FormItem {...formItemLayout} label="检测间隔时间">
{getFieldDecorator("period_second", {
initialValue: data.period_second || "3",
rules: [
{
required: true,
message: "请填写检测间隔时间",
},
],
})(<Input
type="number"
style={{
width: "80%",
}}
/>)}
<span
style={{
marginLeft: 8,
}}
>
秒
</span>
</FormItem>
<FormItem {...formItemLayout} label="检测超时时间">
{getFieldDecorator("timeout_second", {
initialValue: data.timeout_second || "20",
rules: [
{
required: true,
message: "请填写检测超时时间",
},
],
})(<Input
type="number"
style={{
width: "80%",
}}
/>)}
<span
style={{
marginLeft: 8,
}}
>
秒
</span>
</FormItem>
<FormItem {...formItemLayout} label="连续错误次数">
{getFieldDecorator("failure_threshold", {
initialValue: data.failure_threshold || "3",
rules: [
{
required: true,
message: "请填写连续错误次数",
},
],
})(<Input
type="number"
style={{
width: "80%",
}}
/>)}
</FormItem>
</Form>
</Modal>
);
}
}
|
},
};
const { getFieldDecorator, getFieldValue } = this.props.form;
const scheme = getFieldValue("scheme") || "tcp";
|
multi_map_message_type.py
|
MULTIMAP_PUT = 0x0201
MULTIMAP_GET = 0x0202
MULTIMAP_REMOVE = 0x0203
MULTIMAP_KEYSET = 0x0204
MULTIMAP_VALUES = 0x0205
MULTIMAP_ENTRYSET = 0x0206
MULTIMAP_CONTAINSKEY = 0x0207
MULTIMAP_CONTAINSVALUE = 0x0208
MULTIMAP_CONTAINSENTRY = 0x0209
MULTIMAP_SIZE = 0x020a
MULTIMAP_CLEAR = 0x020b
MULTIMAP_VALUECOUNT = 0x020c
MULTIMAP_ADDENTRYLISTENERTOKEY = 0x020d
MULTIMAP_ADDENTRYLISTENER = 0x020e
MULTIMAP_REMOVEENTRYLISTENER = 0x020f
MULTIMAP_LOCK = 0x0210
MULTIMAP_TRYLOCK = 0x0211
MULTIMAP_ISLOCKED = 0x0212
MULTIMAP_UNLOCK = 0x0213
|
MULTIMAP_FORCEUNLOCK = 0x0214
MULTIMAP_REMOVEENTRY = 0x0215
|
|
docfields.py
|
"""
sphinx.util.docfields
~~~~~~~~~~~~~~~~~~~~~
"Doc fields" are reST field lists in object descriptions that will
be domain-specifically transformed to a more appealing presentation.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type, Union, cast
from docutils import nodes
from docutils.nodes import Node
from sphinx import addnodes
from sphinx.environment import BuildEnvironment
from sphinx.util.typing import TextlikeNode
if TYPE_CHECKING:
from sphinx.directive import ObjectDescription
def _is_single_paragraph(node: nodes.field_body) -> bool:
"""True if the node only contains one paragraph (and system messages)."""
if len(node) == 0:
return False
elif len(node) > 1:
for subnode in node[1:]: # type: Node
if not isinstance(subnode, nodes.system_message):
return False
if isinstance(node[0], nodes.paragraph):
return True
return False
class Field:
"""A doc field that is never grouped. It can have an argument or not, the
argument can be linked using a specified *rolename*. Field should be used
for doc fields that usually don't occur more than once.
The body can be linked using a specified *bodyrolename* if the content is
just a single inline or text node.
Example::
:returns: description of the return value
:rtype: description of the return type
"""
is_grouped = False
is_typed = False
def __init__(self, name: str, names: Tuple[str, ...] = (), label: str = None,
has_arg: bool = True, rolename: str = None, bodyrolename: str = None) -> None:
self.name = name
self.names = names
self.label = label
self.has_arg = has_arg
self.rolename = rolename
self.bodyrolename = bodyrolename
def make_xref(self, rolename: str, domain: str, target: str,
innernode: Type[TextlikeNode] = addnodes.literal_emphasis,
contnode: Node = None, env: BuildEnvironment = None) -> Node:
if not rolename:
return contnode or innernode(target, target)
refnode = addnodes.pending_xref('', refdomain=domain, refexplicit=False,
reftype=rolename, reftarget=target)
refnode += contnode or innernode(target, target)
if env:
env.get_domain(domain).process_field_xref(refnode)
return refnode
def make_xrefs(self, rolename: str, domain: str, target: str,
innernode: Type[TextlikeNode] = addnodes.literal_emphasis,
contnode: Node = None, env: BuildEnvironment = None) -> List[Node]:
return [self.make_xref(rolename, domain, target, innernode, contnode, env)]
def make_entry(self, fieldarg: str, content: List[Node]) -> Tuple[str, List[Node]]:
return (fieldarg, content)
def make_field(self, types: Dict[str, List[Node]], domain: str,
item: Tuple, env: BuildEnvironment = None) -> nodes.field:
fieldarg, content = item
fieldname = nodes.field_name('', self.label)
if fieldarg:
fieldname += nodes.Text(' ')
fieldname.extend(self.make_xrefs(self.rolename, domain,
fieldarg, nodes.Text, env=env))
if len(content) == 1 and (
isinstance(content[0], nodes.Text) or
(isinstance(content[0], nodes.inline) and len(content[0]) == 1 and
isinstance(content[0][0], nodes.Text))):
content = self.make_xrefs(self.bodyrolename, domain,
content[0].astext(), contnode=content[0], env=env)
fieldbody = nodes.field_body('', nodes.paragraph('', '', *content))
return nodes.field('', fieldname, fieldbody)
class GroupedField(Field):
"""
A doc field that is grouped; i.e., all fields of that type will be
transformed into one field with its body being a bulleted list. It always
has an argument. The argument can be linked using the given *rolename*.
GroupedField should be used for doc fields that can occur more than once.
If *can_collapse* is true, this field will revert to a Field if only used
once.
Example::
:raises ErrorClass: description when it is raised
"""
is_grouped = True
list_type = nodes.bullet_list
def
|
(self, name: str, names: Tuple[str, ...] = (), label: str = None,
rolename: str = None, can_collapse: bool = False) -> None:
super().__init__(name, names, label, True, rolename)
self.can_collapse = can_collapse
def make_field(self, types: Dict[str, List[Node]], domain: str,
items: Tuple, env: BuildEnvironment = None) -> nodes.field:
fieldname = nodes.field_name('', self.label)
listnode = self.list_type()
for fieldarg, content in items:
par = nodes.paragraph()
par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
addnodes.literal_strong, env=env))
par += nodes.Text(' -- ')
par += content
listnode += nodes.list_item('', par)
if len(items) == 1 and self.can_collapse:
list_item = cast(nodes.list_item, listnode[0])
fieldbody = nodes.field_body('', list_item[0])
return nodes.field('', fieldname, fieldbody)
fieldbody = nodes.field_body('', listnode)
return nodes.field('', fieldname, fieldbody)
class TypedField(GroupedField):
"""
A doc field that is grouped and has type information for the arguments. It
always has an argument. The argument can be linked using the given
*rolename*, the type using the given *typerolename*.
Two uses are possible: either parameter and type description are given
separately, using a field from *names* and one from *typenames*,
respectively, or both are given using a field from *names*, see the example.
Example::
:param foo: description of parameter foo
:type foo: SomeClass
-- or --
:param SomeClass foo: description of parameter foo
"""
is_typed = True
def __init__(self, name: str, names: Tuple[str, ...] = (), typenames: Tuple[str, ...] = (),
label: str = None, rolename: str = None, typerolename: str = None,
can_collapse: bool = False) -> None:
super().__init__(name, names, label, rolename, can_collapse)
self.typenames = typenames
self.typerolename = typerolename
def make_field(self, types: Dict[str, List[Node]], domain: str,
items: Tuple, env: BuildEnvironment = None) -> nodes.field:
def handle_item(fieldarg: str, content: str) -> nodes.paragraph:
par = nodes.paragraph()
par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
addnodes.literal_strong, env=env))
if fieldarg in types:
par += nodes.Text(' (')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = fieldtype[0].astext()
par.extend(self.make_xrefs(self.typerolename, domain, typename,
addnodes.literal_emphasis, env=env))
else:
par += fieldtype
par += nodes.Text(')')
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode: Node = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
class DocFieldTransformer:
"""
Transforms field lists in "doc field" syntax into better-looking
equivalents, using the field type definitions given on a domain.
"""
typemap: Dict[str, Tuple[Field, bool]] = None
def __init__(self, directive: "ObjectDescription") -> None:
self.directive = directive
self.typemap = directive.get_field_type_map()
def transform_all(self, node: addnodes.desc_content) -> None:
"""Transform all field list children of a node."""
# don't traverse, only handle field lists that are immediate children
for child in node:
if isinstance(child, nodes.field_list):
self.transform(child)
def transform(self, node: nodes.field_list) -> None:
"""Transform a single field list *node*."""
typemap = self.typemap
entries: List[Union[nodes.field, Tuple[Field, Any]]] = []
groupindices: Dict[str, int] = {}
types: Dict[str, Dict] = {}
# step 1: traverse all fields and collect field types and content
for field in cast(List[nodes.field], node):
assert len(field) == 2
field_name = cast(nodes.field_name, field[0])
field_body = cast(nodes.field_body, field[1])
try:
# split into field type and argument
fieldtype_name, fieldarg = field_name.astext().split(None, 1)
except ValueError:
# maybe an argument-less field type?
fieldtype_name, fieldarg = field_name.astext(), ''
typedesc, is_typefield = typemap.get(fieldtype_name, (None, None))
# collect the content, trying not to keep unnecessary paragraphs
if _is_single_paragraph(field_body):
paragraph = cast(nodes.paragraph, field_body[0])
content = paragraph.children
else:
content = field_body.children
# sort out unknown fields
if typedesc is None or typedesc.has_arg != bool(fieldarg):
# either the field name is unknown, or the argument doesn't
# match the spec; capitalize field name and be done with it
new_fieldname = fieldtype_name[0:1].upper() + fieldtype_name[1:]
if fieldarg:
new_fieldname += ' ' + fieldarg
field_name[0] = nodes.Text(new_fieldname)
entries.append(field)
# but if this has a type then we can at least link it
if (typedesc and is_typefield and content and
len(content) == 1 and isinstance(content[0], nodes.Text)):
typed_field = cast(TypedField, typedesc)
target = content[0].astext()
xrefs = typed_field.make_xrefs(
typed_field.typerolename,
self.directive.domain,
target,
contnode=content[0],
env=self.directive.state.document.settings.env
)
if _is_single_paragraph(field_body):
paragraph = cast(nodes.paragraph, field_body[0])
paragraph.clear()
paragraph.extend(xrefs)
else:
field_body.clear()
field_body += nodes.paragraph('', '', *xrefs)
continue
typename = typedesc.name
# if the field specifies a type, put it in the types collection
if is_typefield:
# filter out only inline nodes; others will result in invalid
# markup being written out
content = [n for n in content if isinstance(n, nodes.Inline) or
isinstance(n, nodes.Text)]
if content:
types.setdefault(typename, {})[fieldarg] = content
continue
# also support syntax like ``:param type name:``
if typedesc.is_typed:
try:
argtype, argname = fieldarg.split(None, 1)
except ValueError:
pass
else:
types.setdefault(typename, {})[argname] = \
[nodes.Text(argtype)]
fieldarg = argname
translatable_content = nodes.inline(field_body.rawsource,
translatable=True)
translatable_content.document = field_body.parent.document
translatable_content.source = field_body.parent.source
translatable_content.line = field_body.parent.line
translatable_content += content
# grouped entries need to be collected in one entry, while others
# get one entry per field
if typedesc.is_grouped:
if typename in groupindices:
group = cast(Tuple[Field, List], entries[groupindices[typename]])
else:
groupindices[typename] = len(entries)
group = (typedesc, [])
entries.append(group)
new_entry = typedesc.make_entry(fieldarg, [translatable_content])
group[1].append(new_entry)
else:
new_entry = typedesc.make_entry(fieldarg, [translatable_content])
entries.append((typedesc, new_entry))
# step 2: all entries are collected, construct the new field list
new_list = nodes.field_list()
for entry in entries:
if isinstance(entry, nodes.field):
# pass-through old field
new_list += entry
else:
fieldtype, items = entry
fieldtypes = types.get(fieldtype.name, {})
env = self.directive.state.document.settings.env
new_list += fieldtype.make_field(fieldtypes, self.directive.domain,
items, env=env)
node.replace_self(new_list)
|
__init__
|
cleaner.ts
|
// import { parse, HTMLElement } from 'node-html-parser';
export default function
|
(html: string): string {
html = html
.replace(/[\r\n]/g, '') //去掉回车
.replace(
/<iframe src="https:\/\/store\.steampowered\.com\/widget\/\d+\/" style="border:none;height:190px;width:100%;max-width:646px;"><\/iframe>/g,
''
) //去除 Steam Widget 空白
.replace(/(<br\s?\/>){2,}/g, '<br/><br/>') //去多余换行
.replace(/src="forum\.php/g, 'src="https://steamcn.com/forum.php') //相对地址添加域名
.replace(/src="static/g, 'src="https://steamcn.com/static')
.replace(/href="forum\.php/g, 'href="https://steamcn.com/forum.php')
.replace(/font size="(7|6)"/g, 'font size="5"') // 最大字号为 5
.replace(
/"https:\/\/steamcn\.com\/static\/image\/common\/none\.gif" zoomfile=/g,
''
) //替换默认 src 中的 none 图片
.replace(
/<div class='showhide'><p>隐藏内容,<a href='javascript:;' onClick="var s=this\.parentNode\.parentNode\.getElementsByClassName\('spoiler'\)\[0\];'none'==s\.style\.display\?\(s\.style\.display='block',this\.innerHTML='点击隐藏'\) : \(s\.style\.display='none',this\.innerHTML='点击显示'\);">点击显示<\/a><\/p><div style='display: none;' class='spoiler'>/g,
"<div class='showhide'><div class='spoiler'>"
) //去除 spoiler 隐藏代码,直接显示隐藏内容
.replace(/width="\d+"/g, '') // 去除图片宽高样式
.replace(/height="\d+"/g, '')
.replace(/<ignore_js_op><dl class="tattl attm"><dt><\/dt><dd>/g, '') //去除未插入主题的图片列表左侧 dd 产生的 padding
.replace(/<\/div><\/dd><\/dl><\/ignore_js_op>/g, '')
.replace(
/<script type="text\/javascript">replyreload += ',' + \d+;<\/script><script type="text\/javascript">replyreload += ',' + \d+;<\/script>/g,
''
) // 去除 script 标签
.replace(
/<script type='text\/javascript'> if \(typeof jQuery(.+)return false; }\); } <\/script>/g,
''
)
.replace(
/<script type="text\/javascript" reload="1">(.+)'true'\);<\/script><br\/><br\/>/g,
''
)
.replace(
/<i class="pstatus">(.+编辑) <\/i>(<br\/>)?/g,
'<div align="center"><font color="#808080"><i class="pstatus">$1</i></font></div>'
) //改变编辑提示样式
.replace(
/<div class="modact">.+(分类|移动|合并|关闭|提升|限时高亮|加入精华|审核通过)<\/a><\/div>/g,
''
); // 去除版务操作提示
/*
const root = parse(html) as HTMLElement;
console.log(root);
*/
// console.log('HTML::', html);
return html;
}
|
contentCleaner
|
scene.js
|
class Scene extends Phaser.Scene{
constructor(){
super({ key: "Scene" });
this.player=null;
this.platforms=null;
this.heart=null;
this.lose=null;
this.enemy=null;
|
preload(){
this.load.audio('lose', 'music/lose.mp3');
this.load.audio('pick', 'music/pick.mp3');
this.load.image("platform",'assets/platform.png');
this.load.image("heart",'assets/heart.png');
this.load.image("lose",'assets/lose.png');
this.load.image("enemy","assets/c.png");
this.load.image("coin","assets/coin.png");
this.load.spritesheet('player','assets/rabbit3 - doux.png',{frameWidth:72 ,frameHeight:72 });
}
create(){
this.player=new Player(this,400,165).setInteractive();
this.lose=this.add.image(450,170,'lose').setInteractive();
this.input.setDraggable([this.lose,this.player]);
this.input.on('drag',function(pointer,gameObj,dragX,dragY)
{
gameObj.x=dragX;
gameObj.y=dragY;
})
this.input.on('dragstart', function (pointer, gameObject) {
gameObject.setTint(0xff69b4);
gameObject.setScale(2);
gameObject.setDepth(1);
})
this.input.on('dragend', function (pointer, gameObject) {
gameObject.setTint();
gameObject.setScale();
gameObject.setDepth();
})
this.score=0;
this.scoreText=this.add.text(10,10,"Score",{
fontSize:50,
fill:"white"
});
// this.player=new Player(this,400,165);
this.platforms=this.physics.add.staticGroup();
this.platforms.create(300,214,'platform');
this.platforms.create(500,214,'platform');
this.lose=this.physics.add.staticGroup();
this.lose.create(450,170,'lose');
this.heart=this.physics.add.staticGroup();
this.heart.create( 270,170 , 'heart');
this.enemy=new Enemy(this,100,570);
this.enemys = [this.enemy];
this.coins=this.physics.add.staticGroup();
this.coins.create(50,400,'coin');
this.coins.create(500,400,'coin');
this.physics.add.collider(this.player, this.platforms);
this.anims.create({
key:'left',
frames:this.anims.generateFrameNumbers('player',{start:14,end:16}),
frameRate:10,
repeat:1,
flipX:true,
});
this.anims.create({
key:'right',
frames:this.anims.generateFrameNumbers('player',{start:5,end: 7}),
frameRate: 10 ,
repeat:1
});
this.physics.add.overlap(
this.heart,
this.player,
this.player_touch_heart,
null,
this
);
this.physics.add.overlap(
this.coins,
this.enemy,
this.enemy_touch_coin,
null,
this
);
this.cursors = this.input.keyboard.createCursorKeys();
}
player_touch_heart(player,heart){
heart.disableBody(true, true);
this.sound.play('pick');
this.score++;
}
enemy_touch_coin(enemy,coins) {
let random = Math.floor(Math.random() * 1000) % 2;
switch (random) {
case 0:
enemy.setDirection("right");
break;
case 1:
enemy.setDirection("left");
break;
default:
break;
}
}
update(){
this.scoreText.setText("Score:"+this.score);
if(this.cursors.left.isDown){
this.player.setVelocityX(-200);
this.player.anims.play("left",true);
}else if(this.cursors.right.isDown){
this.player.setVelocityX(200);
this.player.anims.play("right",true);
}
else{
this.player.setVelocityX(0);
}
for (let enemy of this.enemys) {
if (enemy.getDirection() === "left") {
enemy.setVelocity(-50, 0);
} else if(enemy.getDirection() === "right"){
enemy.setVelocity(50, 0);
}
}
}
}
|
this.coins=null;
}
|
index.test.js
|
'use strict'
const src = '../../../../../../src'
const Test = require('tapes')(require('tape'))
const Sinon = require('sinon')
const DnsServer = require(`${src}/services/query/dns/server`)
const TcpServer = require(`${src}/services/query/dns/server/tcp`)
const UdpServer = require(`${src}/services/query/dns/server/udp`)
Test('DnsServer', dnsServerTest => {
let sandbox
dnsServerTest.beforeEach(t => {
sandbox = Sinon.createSandbox()
sandbox.stub(TcpServer, 'createServer')
sandbox.stub(UdpServer, 'createServer')
t.end()
})
dnsServerTest.afterEach(t => {
sandbox.restore()
t.end()
})
|
dnsServerTest.test('createServer should', createServerTest => {
createServerTest.test('create default server', test => {
let opts = {}
DnsServer.createServer(opts)
test.ok(UdpServer.createServer.calledOnce)
test.ok(UdpServer.createServer.calledWith(opts))
test.end()
})
createServerTest.end()
})
dnsServerTest.test('createTcpServer should', createTcpServerTest => {
createTcpServerTest.test('create TCP server', test => {
let opts = {}
DnsServer.createTcpServer(opts)
test.ok(TcpServer.createServer.calledOnce)
test.ok(TcpServer.createServer.calledWith(opts))
test.end()
})
createTcpServerTest.end()
})
dnsServerTest.test('createUdpServer should', createUdpServerTest => {
createUdpServerTest.test('create UDP server', test => {
let opts = {}
DnsServer.createUdpServer(opts)
test.ok(UdpServer.createServer.calledOnce)
test.ok(UdpServer.createServer.calledWith(opts))
test.end()
})
createUdpServerTest.end()
})
dnsServerTest.end()
})
| |
setup.py
|
import sys
from pathlib import Path
from setuptools import setup, find_packages
if __name__ == '__main__':
base_dir = Path(__file__).parent
src_dir = base_dir/'src'/'regmod'
sys.path.insert(0, src_dir.as_posix())
import __about__ as about
with (base_dir/'README.rst').open() as f:
long_description = f.read()
install_requirements = [
'numpy',
'scipy',
'pandas',
'xspline',
]
test_requirements = [
'pytest',
|
]
doc_requirements = []
setup(name=about.__title__,
version=about.__version__,
description=about.__summary__,
long_description=long_description,
license=about.__license__,
url=about.__uri__,
author=about.__author__,
author_email=about.__email__,
package_dir={'': 'src'},
packages=find_packages(where='src'),
include_package_data=True,
install_requires=install_requirements,
tests_require=test_requirements,
extras_require={
'docs': doc_requirements,
'test': test_requirements,
'dev': doc_requirements + test_requirements
},
zip_safe=False,)
|
'pytest-mock',
|
assetManager.js
|
var __webpack_exports__ = {};
function _createForOfIteratorHelper(o, allowArrayLike) { var it = typeof Symbol !== "undefined" && o[Symbol.iterator] || o["@@iterator"]; if (!it) { if (Array.isArray(o) || (it = _unsupportedIterableToArray(o)) || allowArrayLike && o && typeof o.length === "number") { if (it) o = it; var i = 0; var F = function F() {}; return { s: F, n: function n() { if (i >= o.length) return { done: true }; return { done: false, value: o[i++] }; }, e: function e(_e) { throw _e; }, f: F }; } throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method."); } var normalCompletion = true, didErr = false, err; return { s: function s() { it = it.call(o); }, n: function n() { var step = it.next(); normalCompletion = step.done; return step; }, e: function e(_e2) { didErr = true; err = _e2; }, f: function f() { try { if (!normalCompletion && it.return != null) it.return(); } finally { if (didErr) throw err; } } }; }
function _unsupportedIterableToArray(o, minLen) { if (!o) return; if (typeof o === "string") return _arrayLikeToArray(o, minLen); var n = Object.prototype.toString.call(o).slice(8, -1); if (n === "Object" && o.constructor) n = o.constructor.name; if (n === "Map" || n === "Set") return Array.from(o); if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen); }
function _arrayLikeToArray(arr, len) { if (len == null || len > arr.length) len = arr.length; for (var i = 0, arr2 = new Array(len); i < len; i++) { arr2[i] = arr[i]; } return arr2; }
function _typeof(obj) { "@babel/helpers - typeof"; return _typeof = "function" == typeof Symbol && "symbol" == typeof Symbol.iterator ? function (obj) { return typeof obj; } : function (obj) { return obj && "function" == typeof Symbol && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }, _typeof(obj); }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }
function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); Object.defineProperty(Constructor, "prototype", { writable: false }); return Constructor; }
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
/**
* Asset loading helper
*/
var AssetManager = /*#__PURE__*/function () {
function AssetManager() {
_classCallCheck(this, AssetManager);
_defineProperty(this, "items", void 0);
_defineProperty(this, "assets", void 0);
this.items = new Map();
this.assets = [];
}
_createClass(AssetManager, [{
key: "setAssets",
value: function setAssets(assets) {
this.assets = assets;
}
}, {
key: "load",
value: function load() {
var _this = this;
var params = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
// Filter assets
var assetsToLoad = this.assets.filter(function (asset) {
if (!params.name && !params.label) return true;
if (params.name === asset.name) return true;
if (typeof params.label === 'string' && asset.label === params.label) return true;
if (_typeof(params.label) === 'object' && Array.isArray(params.label) && params.label.includes(asset.label)) return true;
return false;
});
if (!assetsToLoad.length) {
if (params.onProgress) params.onProgress(100);
return undefined;
}
var promises = [];
var assetsLoaded = 0; // eslint-disable-next-line no-restricted-syntax
var _iterator = _createForOfIteratorHelper(assetsToLoad),
_step;
try {
var _loop = function _loop() {
var asset = _step.value;
promises.push( // eslint-disable-next-line @typescript-eslint/no-loop-func
new Promise(function (resolve, reject) {
if (asset.loadHandler) {
asset.loadHandler(_this, function () {
assetsLoaded += 1;
if (params.onProgress) params.onProgress(assetsLoaded / assetsToLoad.length * 100);
resolve();
}, function () {
reject(new Error("Cannot load asset \"".concat(asset.name, "\"")));
});
return;
}
asset.loader.load(asset.path, function (result) {
_this.setItem(asset.name, asset.postLoadHandler ? asset.postLoadHandler(result) : result);
assetsLoaded += 1;
if (params.onProgress) params.onProgress(assetsLoaded / assetsToLoad.length * 100);
resolve();
}, function () {
return false;
}, function () {
reject(new Error("Cannot load asset \"".concat(asset.name, "\"")));
});
}));
};
for (_iterator.s(); !(_step = _iterator.n()).done;) {
|
} finally {
_iterator.f();
}
return Promise.all(promises);
}
}, {
key: "setItem",
value: function setItem(name) {
var item = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : null;
this.items.set(name, item);
}
}, {
key: "getItem",
value: function getItem(name) {
var item = this.items.get(name);
if (!item) throw new Error("Item ".concat(name, " was not found."));
return item;
}
}]);
return AssetManager;
}();
var assetManager = new AssetManager();
/* harmony default export */ __webpack_exports__["Z"] = (assetManager);
var __webpack_exports__default = __webpack_exports__.Z;
export { __webpack_exports__default as default };
|
_loop();
}
} catch (err) {
_iterator.e(err);
|
schema.go
|
// Copyright (c) 2004-present Facebook All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated (@generated) by entc, DO NOT EDIT.
package migrate
import (
"github.com/facebookincubator/ent/dialect/sql/schema"
"github.com/facebookincubator/ent/schema/field"
)
var (
// ActionsRulesColumns holds the columns for the "actions_rules" table.
ActionsRulesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString},
{Name: "trigger_id", Type: field.TypeString},
{Name: "rule_filters", Type: field.TypeJSON},
{Name: "rule_actions", Type: field.TypeJSON},
}
// ActionsRulesTable holds the schema information for the "actions_rules" table.
ActionsRulesTable = &schema.Table{
Name: "actions_rules",
Columns: ActionsRulesColumns,
PrimaryKey: []*schema.Column{ActionsRulesColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
}
// ActivitiesColumns holds the columns for the "activities" table.
ActivitiesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "changed_field", Type: field.TypeEnum, Enums: []string{"STATUS", "PRIORITY", "ASSIGNEE", "CREATION_DATE", "OWNER"}},
{Name: "is_create", Type: field.TypeBool},
{Name: "old_value", Type: field.TypeString, Nullable: true},
{Name: "new_value", Type: field.TypeString, Nullable: true},
{Name: "activity_author", Type: field.TypeInt, Nullable: true},
{Name: "work_order_activities", Type: field.TypeInt, Nullable: true},
}
// ActivitiesTable holds the schema information for the "activities" table.
ActivitiesTable = &schema.Table{
Name: "activities",
Columns: ActivitiesColumns,
PrimaryKey: []*schema.Column{ActivitiesColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "activities_users_author",
Columns: []*schema.Column{ActivitiesColumns[7]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "activities_work_orders_activities",
Columns: []*schema.Column{ActivitiesColumns[8]},
RefColumns: []*schema.Column{WorkOrdersColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// CheckListCategoriesColumns holds the columns for the "check_list_categories" table.
CheckListCategoriesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "title", Type: field.TypeString},
{Name: "description", Type: field.TypeString, Nullable: true},
{Name: "work_order_check_list_categories", Type: field.TypeInt, Nullable: true},
}
// CheckListCategoriesTable holds the schema information for the "check_list_categories" table.
CheckListCategoriesTable = &schema.Table{
Name: "check_list_categories",
Columns: CheckListCategoriesColumns,
PrimaryKey: []*schema.Column{CheckListCategoriesColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "check_list_categories_work_orders_check_list_categories",
Columns: []*schema.Column{CheckListCategoriesColumns[5]},
RefColumns: []*schema.Column{WorkOrdersColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// CheckListCategoryDefinitionsColumns holds the columns for the "check_list_category_definitions" table.
CheckListCategoryDefinitionsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "title", Type: field.TypeString},
{Name: "description", Type: field.TypeString, Nullable: true},
{Name: "work_order_template_check_list_category_definitions", Type: field.TypeInt, Nullable: true},
{Name: "work_order_type_check_list_category_definitions", Type: field.TypeInt, Nullable: true},
}
// CheckListCategoryDefinitionsTable holds the schema information for the "check_list_category_definitions" table.
CheckListCategoryDefinitionsTable = &schema.Table{
Name: "check_list_category_definitions",
Columns: CheckListCategoryDefinitionsColumns,
PrimaryKey: []*schema.Column{CheckListCategoryDefinitionsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "check_list_category_definitions_work_order_templates_check_list_category_definitions",
Columns: []*schema.Column{CheckListCategoryDefinitionsColumns[5]},
RefColumns: []*schema.Column{WorkOrderTemplatesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "check_list_category_definitions_work_order_types_check_list_category_definitions",
Columns: []*schema.Column{CheckListCategoryDefinitionsColumns[6]},
RefColumns: []*schema.Column{WorkOrderTypesColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// CheckListItemsColumns holds the columns for the "check_list_items" table.
CheckListItemsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "title", Type: field.TypeString},
{Name: "type", Type: field.TypeString},
{Name: "index", Type: field.TypeInt, Nullable: true},
{Name: "checked", Type: field.TypeBool, Nullable: true},
{Name: "string_val", Type: field.TypeString, Nullable: true},
{Name: "enum_values", Type: field.TypeString, Nullable: true},
{Name: "enum_selection_mode_value", Type: field.TypeEnum, Nullable: true, Enums: []string{"single", "multiple"}},
{Name: "selected_enum_values", Type: field.TypeString, Nullable: true},
{Name: "yes_no_val", Type: field.TypeEnum, Nullable: true, Enums: []string{"YES", "NO"}},
{Name: "help_text", Type: field.TypeString, Nullable: true},
{Name: "check_list_category_check_list_items", Type: field.TypeInt, Nullable: true},
}
// CheckListItemsTable holds the schema information for the "check_list_items" table.
CheckListItemsTable = &schema.Table{
Name: "check_list_items",
Columns: CheckListItemsColumns,
PrimaryKey: []*schema.Column{CheckListItemsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "check_list_items_check_list_categories_check_list_items",
Columns: []*schema.Column{CheckListItemsColumns[11]},
RefColumns: []*schema.Column{CheckListCategoriesColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// CheckListItemDefinitionsColumns holds the columns for the "check_list_item_definitions" table.
CheckListItemDefinitionsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "title", Type: field.TypeString},
{Name: "type", Type: field.TypeString},
{Name: "index", Type: field.TypeInt, Nullable: true},
{Name: "enum_values", Type: field.TypeString, Nullable: true},
{Name: "enum_selection_mode_value", Type: field.TypeEnum, Nullable: true, Enums: []string{"single", "multiple"}},
{Name: "help_text", Type: field.TypeString, Nullable: true},
{Name: "check_list_category_definition_check_list_item_definitions", Type: field.TypeInt, Nullable: true},
}
// CheckListItemDefinitionsTable holds the schema information for the "check_list_item_definitions" table.
CheckListItemDefinitionsTable = &schema.Table{
Name: "check_list_item_definitions",
Columns: CheckListItemDefinitionsColumns,
PrimaryKey: []*schema.Column{CheckListItemDefinitionsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "check_list_item_definitions_check_list_category_definitions_check_list_item_definitions",
Columns: []*schema.Column{CheckListItemDefinitionsColumns[9]},
RefColumns: []*schema.Column{CheckListCategoryDefinitionsColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// CommentsColumns holds the columns for the "comments" table.
CommentsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "text", Type: field.TypeString},
{Name: "comment_author", Type: field.TypeInt, Nullable: true},
{Name: "project_comments", Type: field.TypeInt, Nullable: true},
{Name: "work_order_comments", Type: field.TypeInt, Nullable: true},
}
// CommentsTable holds the schema information for the "comments" table.
CommentsTable = &schema.Table{
Name: "comments",
Columns: CommentsColumns,
PrimaryKey: []*schema.Column{CommentsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "comments_users_author",
Columns: []*schema.Column{CommentsColumns[4]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "comments_projects_comments",
Columns: []*schema.Column{CommentsColumns[5]},
RefColumns: []*schema.Column{ProjectsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "comments_work_orders_comments",
Columns: []*schema.Column{CommentsColumns[6]},
RefColumns: []*schema.Column{WorkOrdersColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// CustomersColumns holds the columns for the "customers" table.
CustomersColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString, Unique: true},
{Name: "external_id", Type: field.TypeString, Unique: true, Nullable: true},
}
// CustomersTable holds the schema information for the "customers" table.
CustomersTable = &schema.Table{
Name: "customers",
Columns: CustomersColumns,
PrimaryKey: []*schema.Column{CustomersColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
}
// EquipmentColumns holds the columns for the "equipment" table.
EquipmentColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString},
{Name: "future_state", Type: field.TypeString, Nullable: true},
{Name: "device_id", Type: field.TypeString, Nullable: true},
{Name: "external_id", Type: field.TypeString, Unique: true, Nullable: true},
{Name: "equipment_type", Type: field.TypeInt, Nullable: true},
{Name: "equipment_work_order", Type: field.TypeInt, Nullable: true},
{Name: "equipment_position_attachment", Type: field.TypeInt, Unique: true, Nullable: true},
{Name: "location_equipment", Type: field.TypeInt, Nullable: true},
}
// EquipmentTable holds the schema information for the "equipment" table.
EquipmentTable = &schema.Table{
Name: "equipment",
Columns: EquipmentColumns,
PrimaryKey: []*schema.Column{EquipmentColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "equipment_equipment_types_type",
Columns: []*schema.Column{EquipmentColumns[7]},
RefColumns: []*schema.Column{EquipmentTypesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "equipment_work_orders_work_order",
Columns: []*schema.Column{EquipmentColumns[8]},
RefColumns: []*schema.Column{WorkOrdersColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "equipment_equipment_positions_attachment",
Columns: []*schema.Column{EquipmentColumns[9]},
RefColumns: []*schema.Column{EquipmentPositionsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "equipment_locations_equipment",
Columns: []*schema.Column{EquipmentColumns[10]},
RefColumns: []*schema.Column{LocationsColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// EquipmentCategoriesColumns holds the columns for the "equipment_categories" table.
EquipmentCategoriesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString, Unique: true},
}
// EquipmentCategoriesTable holds the schema information for the "equipment_categories" table.
EquipmentCategoriesTable = &schema.Table{
Name: "equipment_categories",
Columns: EquipmentCategoriesColumns,
PrimaryKey: []*schema.Column{EquipmentCategoriesColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
}
// EquipmentPortsColumns holds the columns for the "equipment_ports" table.
EquipmentPortsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "equipment_ports", Type: field.TypeInt, Nullable: true},
{Name: "equipment_port_definition", Type: field.TypeInt, Nullable: true},
{Name: "equipment_port_link", Type: field.TypeInt, Nullable: true},
}
// EquipmentPortsTable holds the schema information for the "equipment_ports" table.
EquipmentPortsTable = &schema.Table{
Name: "equipment_ports",
Columns: EquipmentPortsColumns,
PrimaryKey: []*schema.Column{EquipmentPortsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "equipment_ports_equipment_ports",
Columns: []*schema.Column{EquipmentPortsColumns[3]},
RefColumns: []*schema.Column{EquipmentColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "equipment_ports_equipment_port_definitions_definition",
Columns: []*schema.Column{EquipmentPortsColumns[4]},
RefColumns: []*schema.Column{EquipmentPortDefinitionsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "equipment_ports_links_link",
Columns: []*schema.Column{EquipmentPortsColumns[5]},
RefColumns: []*schema.Column{LinksColumns[0]},
OnDelete: schema.SetNull,
},
},
Indexes: []*schema.Index{
{
Name: "equipmentport_equipment_port_definition_equipment_ports",
Unique: true,
Columns: []*schema.Column{EquipmentPortsColumns[4], EquipmentPortsColumns[3]},
},
},
}
// EquipmentPortDefinitionsColumns holds the columns for the "equipment_port_definitions" table.
EquipmentPortDefinitionsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString},
{Name: "index", Type: field.TypeInt, Nullable: true},
{Name: "bandwidth", Type: field.TypeString, Nullable: true},
{Name: "visibility_label", Type: field.TypeString, Nullable: true},
{Name: "equipment_port_definition_equipment_port_type", Type: field.TypeInt, Nullable: true},
{Name: "equipment_type_port_definitions", Type: field.TypeInt, Nullable: true},
}
// EquipmentPortDefinitionsTable holds the schema information for the "equipment_port_definitions" table.
EquipmentPortDefinitionsTable = &schema.Table{
Name: "equipment_port_definitions",
Columns: EquipmentPortDefinitionsColumns,
PrimaryKey: []*schema.Column{EquipmentPortDefinitionsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "equipment_port_definitions_equipment_port_types_equipment_port_type",
Columns: []*schema.Column{EquipmentPortDefinitionsColumns[7]},
RefColumns: []*schema.Column{EquipmentPortTypesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "equipment_port_definitions_equipment_types_port_definitions",
Columns: []*schema.Column{EquipmentPortDefinitionsColumns[8]},
RefColumns: []*schema.Column{EquipmentTypesColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// EquipmentPortTypesColumns holds the columns for the "equipment_port_types" table.
EquipmentPortTypesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString, Unique: true},
}
// EquipmentPortTypesTable holds the schema information for the "equipment_port_types" table.
EquipmentPortTypesTable = &schema.Table{
Name: "equipment_port_types",
Columns: EquipmentPortTypesColumns,
PrimaryKey: []*schema.Column{EquipmentPortTypesColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
}
// EquipmentPositionsColumns holds the columns for the "equipment_positions" table.
EquipmentPositionsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "equipment_positions", Type: field.TypeInt, Nullable: true},
{Name: "equipment_position_definition", Type: field.TypeInt, Nullable: true},
}
// EquipmentPositionsTable holds the schema information for the "equipment_positions" table.
EquipmentPositionsTable = &schema.Table{
Name: "equipment_positions",
Columns: EquipmentPositionsColumns,
PrimaryKey: []*schema.Column{EquipmentPositionsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "equipment_positions_equipment_positions",
Columns: []*schema.Column{EquipmentPositionsColumns[3]},
RefColumns: []*schema.Column{EquipmentColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "equipment_positions_equipment_position_definitions_definition",
Columns: []*schema.Column{EquipmentPositionsColumns[4]},
RefColumns: []*schema.Column{EquipmentPositionDefinitionsColumns[0]},
OnDelete: schema.SetNull,
},
},
Indexes: []*schema.Index{
{
Name: "equipmentposition_equipment_position_definition_equipment_positions",
Unique: true,
Columns: []*schema.Column{EquipmentPositionsColumns[4], EquipmentPositionsColumns[3]},
},
},
}
// EquipmentPositionDefinitionsColumns holds the columns for the "equipment_position_definitions" table.
EquipmentPositionDefinitionsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString},
{Name: "index", Type: field.TypeInt, Nullable: true},
{Name: "visibility_label", Type: field.TypeString, Nullable: true},
{Name: "equipment_type_position_definitions", Type: field.TypeInt, Nullable: true},
}
// EquipmentPositionDefinitionsTable holds the schema information for the "equipment_position_definitions" table.
EquipmentPositionDefinitionsTable = &schema.Table{
Name: "equipment_position_definitions",
Columns: EquipmentPositionDefinitionsColumns,
PrimaryKey: []*schema.Column{EquipmentPositionDefinitionsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "equipment_position_definitions_equipment_types_position_definitions",
Columns: []*schema.Column{EquipmentPositionDefinitionsColumns[6]},
RefColumns: []*schema.Column{EquipmentTypesColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// EquipmentTypesColumns holds the columns for the "equipment_types" table.
EquipmentTypesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString, Unique: true},
{Name: "equipment_type_category", Type: field.TypeInt, Nullable: true},
}
// EquipmentTypesTable holds the schema information for the "equipment_types" table.
EquipmentTypesTable = &schema.Table{
Name: "equipment_types",
Columns: EquipmentTypesColumns,
PrimaryKey: []*schema.Column{EquipmentTypesColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "equipment_types_equipment_categories_category",
Columns: []*schema.Column{EquipmentTypesColumns[4]},
RefColumns: []*schema.Column{EquipmentCategoriesColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// FilesColumns holds the columns for the "files" table.
FilesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "type", Type: field.TypeEnum, Enums: []string{"IMAGE", "FILE"}},
{Name: "name", Type: field.TypeString},
{Name: "size", Type: field.TypeInt, Nullable: true},
{Name: "modified_at", Type: field.TypeTime, Nullable: true},
{Name: "uploaded_at", Type: field.TypeTime, Nullable: true},
{Name: "content_type", Type: field.TypeString},
{Name: "store_key", Type: field.TypeString},
{Name: "category", Type: field.TypeString, Nullable: true},
{Name: "annotation", Type: field.TypeString, Nullable: true},
{Name: "check_list_item_files", Type: field.TypeInt, Nullable: true},
{Name: "equipment_files", Type: field.TypeInt, Nullable: true},
{Name: "floor_plan_image", Type: field.TypeInt, Unique: true, Nullable: true},
{Name: "location_files", Type: field.TypeInt, Nullable: true},
{Name: "survey_source_file", Type: field.TypeInt, Unique: true, Nullable: true},
{Name: "survey_question_photo_data", Type: field.TypeInt, Nullable: true},
{Name: "survey_question_images", Type: field.TypeInt, Nullable: true},
{Name: "user_profile_photo", Type: field.TypeInt, Unique: true, Nullable: true},
{Name: "work_order_files", Type: field.TypeInt, Nullable: true},
}
// FilesTable holds the schema information for the "files" table.
FilesTable = &schema.Table{
Name: "files",
Columns: FilesColumns,
PrimaryKey: []*schema.Column{FilesColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "files_check_list_items_files",
Columns: []*schema.Column{FilesColumns[12]},
RefColumns: []*schema.Column{CheckListItemsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "files_equipment_files",
Columns: []*schema.Column{FilesColumns[13]},
RefColumns: []*schema.Column{EquipmentColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "files_floor_plans_image",
Columns: []*schema.Column{FilesColumns[14]},
RefColumns: []*schema.Column{FloorPlansColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "files_locations_files",
Columns: []*schema.Column{FilesColumns[15]},
RefColumns: []*schema.Column{LocationsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "files_surveys_source_file",
Columns: []*schema.Column{FilesColumns[16]},
RefColumns: []*schema.Column{SurveysColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "files_survey_questions_photo_data",
Columns: []*schema.Column{FilesColumns[17]},
RefColumns: []*schema.Column{SurveyQuestionsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "files_survey_questions_images",
Columns: []*schema.Column{FilesColumns[18]},
RefColumns: []*schema.Column{SurveyQuestionsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "files_users_profile_photo",
Columns: []*schema.Column{FilesColumns[19]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "files_work_orders_files",
Columns: []*schema.Column{FilesColumns[20]},
RefColumns: []*schema.Column{WorkOrdersColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// FloorPlansColumns holds the columns for the "floor_plans" table.
FloorPlansColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString},
{Name: "floor_plan_location", Type: field.TypeInt, Nullable: true},
{Name: "floor_plan_reference_point", Type: field.TypeInt, Nullable: true},
{Name: "floor_plan_scale", Type: field.TypeInt, Nullable: true},
}
// FloorPlansTable holds the schema information for the "floor_plans" table.
FloorPlansTable = &schema.Table{
Name: "floor_plans",
Columns: FloorPlansColumns,
PrimaryKey: []*schema.Column{FloorPlansColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "floor_plans_locations_location",
Columns: []*schema.Column{FloorPlansColumns[4]},
RefColumns: []*schema.Column{LocationsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "floor_plans_floor_plan_reference_points_reference_point",
Columns: []*schema.Column{FloorPlansColumns[5]},
RefColumns: []*schema.Column{FloorPlanReferencePointsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "floor_plans_floor_plan_scales_scale",
Columns: []*schema.Column{FloorPlansColumns[6]},
RefColumns: []*schema.Column{FloorPlanScalesColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// FloorPlanReferencePointsColumns holds the columns for the "floor_plan_reference_points" table.
FloorPlanReferencePointsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "x", Type: field.TypeInt},
{Name: "y", Type: field.TypeInt},
{Name: "latitude", Type: field.TypeFloat64},
{Name: "longitude", Type: field.TypeFloat64},
}
// FloorPlanReferencePointsTable holds the schema information for the "floor_plan_reference_points" table.
FloorPlanReferencePointsTable = &schema.Table{
Name: "floor_plan_reference_points",
Columns: FloorPlanReferencePointsColumns,
PrimaryKey: []*schema.Column{FloorPlanReferencePointsColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
}
// FloorPlanScalesColumns holds the columns for the "floor_plan_scales" table.
FloorPlanScalesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "reference_point1_x", Type: field.TypeInt},
{Name: "reference_point1_y", Type: field.TypeInt},
{Name: "reference_point2_x", Type: field.TypeInt},
{Name: "reference_point2_y", Type: field.TypeInt},
{Name: "scale_in_meters", Type: field.TypeFloat64},
}
// FloorPlanScalesTable holds the schema information for the "floor_plan_scales" table.
FloorPlanScalesTable = &schema.Table{
Name: "floor_plan_scales",
Columns: FloorPlanScalesColumns,
PrimaryKey: []*schema.Column{FloorPlanScalesColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
}
// HyperlinksColumns holds the columns for the "hyperlinks" table.
HyperlinksColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "url", Type: field.TypeString},
{Name: "name", Type: field.TypeString, Nullable: true},
{Name: "category", Type: field.TypeString, Nullable: true},
{Name: "equipment_hyperlinks", Type: field.TypeInt, Nullable: true},
{Name: "location_hyperlinks", Type: field.TypeInt, Nullable: true},
{Name: "work_order_hyperlinks", Type: field.TypeInt, Nullable: true},
}
// HyperlinksTable holds the schema information for the "hyperlinks" table.
HyperlinksTable = &schema.Table{
Name: "hyperlinks",
Columns: HyperlinksColumns,
PrimaryKey: []*schema.Column{HyperlinksColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "hyperlinks_equipment_hyperlinks",
Columns: []*schema.Column{HyperlinksColumns[6]},
RefColumns: []*schema.Column{EquipmentColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "hyperlinks_locations_hyperlinks",
Columns: []*schema.Column{HyperlinksColumns[7]},
RefColumns: []*schema.Column{LocationsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "hyperlinks_work_orders_hyperlinks",
Columns: []*schema.Column{HyperlinksColumns[8]},
RefColumns: []*schema.Column{WorkOrdersColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// LinksColumns holds the columns for the "links" table.
LinksColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "future_state", Type: field.TypeString, Nullable: true},
{Name: "link_work_order", Type: field.TypeInt, Nullable: true},
}
// LinksTable holds the schema information for the "links" table.
LinksTable = &schema.Table{
Name: "links",
Columns: LinksColumns,
PrimaryKey: []*schema.Column{LinksColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "links_work_orders_work_order",
Columns: []*schema.Column{LinksColumns[4]},
RefColumns: []*schema.Column{WorkOrdersColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// LocationsColumns holds the columns for the "locations" table.
LocationsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString},
{Name: "external_id", Type: field.TypeString, Unique: true, Nullable: true},
{Name: "latitude", Type: field.TypeFloat64},
{Name: "longitude", Type: field.TypeFloat64},
{Name: "site_survey_needed", Type: field.TypeBool, Nullable: true},
{Name: "location_type", Type: field.TypeInt, Nullable: true},
{Name: "location_children", Type: field.TypeInt, Nullable: true},
}
// LocationsTable holds the schema information for the "locations" table.
LocationsTable = &schema.Table{
Name: "locations",
Columns: LocationsColumns,
PrimaryKey: []*schema.Column{LocationsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "locations_location_types_type",
Columns: []*schema.Column{LocationsColumns[8]},
RefColumns: []*schema.Column{LocationTypesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "locations_locations_children",
Columns: []*schema.Column{LocationsColumns[9]},
RefColumns: []*schema.Column{LocationsColumns[0]},
OnDelete: schema.SetNull,
},
},
Indexes: []*schema.Index{
{
Name: "location_name_location_type_location_children",
Unique: true,
Columns: []*schema.Column{LocationsColumns[3], LocationsColumns[8], LocationsColumns[9]},
},
},
}
// LocationTypesColumns holds the columns for the "location_types" table.
LocationTypesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "site", Type: field.TypeBool},
{Name: "name", Type: field.TypeString, Unique: true},
{Name: "map_type", Type: field.TypeString, Nullable: true},
{Name: "map_zoom_level", Type: field.TypeInt, Nullable: true, Default: 7},
{Name: "index", Type: field.TypeInt},
}
// LocationTypesTable holds the schema information for the "location_types" table.
LocationTypesTable = &schema.Table{
Name: "location_types",
Columns: LocationTypesColumns,
PrimaryKey: []*schema.Column{LocationTypesColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
}
// PermissionsPoliciesColumns holds the columns for the "permissions_policies" table.
PermissionsPoliciesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString, Unique: true},
{Name: "description", Type: field.TypeString, Nullable: true},
{Name: "is_global", Type: field.TypeBool, Nullable: true},
{Name: "inventory_policy", Type: field.TypeJSON, Nullable: true},
{Name: "workforce_policy", Type: field.TypeJSON, Nullable: true},
}
// PermissionsPoliciesTable holds the schema information for the "permissions_policies" table.
PermissionsPoliciesTable = &schema.Table{
Name: "permissions_policies",
Columns: PermissionsPoliciesColumns,
PrimaryKey: []*schema.Column{PermissionsPoliciesColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
}
// ProjectsColumns holds the columns for the "projects" table.
ProjectsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString, Unique: true},
{Name: "description", Type: field.TypeString, Nullable: true, Size: 2147483647},
{Name: "project_location", Type: field.TypeInt, Nullable: true},
{Name: "project_creator", Type: field.TypeInt, Nullable: true},
{Name: "project_type_projects", Type: field.TypeInt, Nullable: true},
}
// ProjectsTable holds the schema information for the "projects" table.
ProjectsTable = &schema.Table{
Name: "projects",
Columns: ProjectsColumns,
PrimaryKey: []*schema.Column{ProjectsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "projects_locations_location",
Columns: []*schema.Column{ProjectsColumns[5]},
RefColumns: []*schema.Column{LocationsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "projects_users_creator",
Columns: []*schema.Column{ProjectsColumns[6]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "projects_project_types_projects",
Columns: []*schema.Column{ProjectsColumns[7]},
RefColumns: []*schema.Column{ProjectTypesColumns[0]},
OnDelete: schema.SetNull,
},
},
Indexes: []*schema.Index{
{
Name: "project_name_project_type_projects",
Unique: true,
Columns: []*schema.Column{ProjectsColumns[3], ProjectsColumns[7]},
},
},
}
// ProjectTypesColumns holds the columns for the "project_types" table.
ProjectTypesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString, Unique: true},
{Name: "description", Type: field.TypeString, Nullable: true, Size: 2147483647},
}
// ProjectTypesTable holds the schema information for the "project_types" table.
ProjectTypesTable = &schema.Table{
Name: "project_types",
Columns: ProjectTypesColumns,
PrimaryKey: []*schema.Column{ProjectTypesColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
}
// PropertiesColumns holds the columns for the "properties" table.
PropertiesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "int_val", Type: field.TypeInt, Nullable: true},
{Name: "bool_val", Type: field.TypeBool, Nullable: true},
{Name: "float_val", Type: field.TypeFloat64, Nullable: true},
{Name: "latitude_val", Type: field.TypeFloat64, Nullable: true},
{Name: "longitude_val", Type: field.TypeFloat64, Nullable: true},
{Name: "range_from_val", Type: field.TypeFloat64, Nullable: true},
{Name: "range_to_val", Type: field.TypeFloat64, Nullable: true},
{Name: "string_val", Type: field.TypeString, Nullable: true},
{Name: "equipment_properties", Type: field.TypeInt, Nullable: true},
{Name: "equipment_port_properties", Type: field.TypeInt, Nullable: true},
{Name: "link_properties", Type: field.TypeInt, Nullable: true},
{Name: "location_properties", Type: field.TypeInt, Nullable: true},
{Name: "project_properties", Type: field.TypeInt, Nullable: true},
{Name: "property_type", Type: field.TypeInt, Nullable: true},
{Name: "property_equipment_value", Type: field.TypeInt, Nullable: true},
{Name: "property_location_value", Type: field.TypeInt, Nullable: true},
{Name: "property_service_value", Type: field.TypeInt, Nullable: true},
{Name: "property_work_order_value", Type: field.TypeInt, Nullable: true},
{Name: "property_user_value", Type: field.TypeInt, Nullable: true},
{Name: "service_properties", Type: field.TypeInt, Nullable: true},
{Name: "work_order_properties", Type: field.TypeInt, Nullable: true},
}
// PropertiesTable holds the schema information for the "properties" table.
PropertiesTable = &schema.Table{
Name: "properties",
Columns: PropertiesColumns,
PrimaryKey: []*schema.Column{PropertiesColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "properties_equipment_properties",
Columns: []*schema.Column{PropertiesColumns[11]},
RefColumns: []*schema.Column{EquipmentColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "properties_equipment_ports_properties",
Columns: []*schema.Column{PropertiesColumns[12]},
RefColumns: []*schema.Column{EquipmentPortsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "properties_links_properties",
Columns: []*schema.Column{PropertiesColumns[13]},
RefColumns: []*schema.Column{LinksColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "properties_locations_properties",
Columns: []*schema.Column{PropertiesColumns[14]},
RefColumns: []*schema.Column{LocationsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "properties_projects_properties",
Columns: []*schema.Column{PropertiesColumns[15]},
RefColumns: []*schema.Column{ProjectsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "properties_property_types_type",
Columns: []*schema.Column{PropertiesColumns[16]},
RefColumns: []*schema.Column{PropertyTypesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "properties_equipment_equipment_value",
Columns: []*schema.Column{PropertiesColumns[17]},
RefColumns: []*schema.Column{EquipmentColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "properties_locations_location_value",
Columns: []*schema.Column{PropertiesColumns[18]},
RefColumns: []*schema.Column{LocationsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "properties_services_service_value",
Columns: []*schema.Column{PropertiesColumns[19]},
RefColumns: []*schema.Column{ServicesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "properties_work_orders_work_order_value",
Columns: []*schema.Column{PropertiesColumns[20]},
RefColumns: []*schema.Column{WorkOrdersColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "properties_users_user_value",
Columns: []*schema.Column{PropertiesColumns[21]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "properties_services_properties",
Columns: []*schema.Column{PropertiesColumns[22]},
RefColumns: []*schema.Column{ServicesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "properties_work_orders_properties",
Columns: []*schema.Column{PropertiesColumns[23]},
RefColumns: []*schema.Column{WorkOrdersColumns[0]},
OnDelete: schema.SetNull,
},
},
Indexes: []*schema.Index{
{
Name: "property_property_type_location_properties",
Unique: true,
Columns: []*schema.Column{PropertiesColumns[16], PropertiesColumns[14]},
},
{
Name: "property_property_type_equipment_properties",
Unique: true,
Columns: []*schema.Column{PropertiesColumns[16], PropertiesColumns[11]},
},
{
Name: "property_property_type_service_properties",
Unique: true,
Columns: []*schema.Column{PropertiesColumns[16], PropertiesColumns[22]},
},
{
Name: "property_property_type_equipment_port_properties",
Unique: true,
Columns: []*schema.Column{PropertiesColumns[16], PropertiesColumns[12]},
},
{
Name: "property_property_type_link_properties",
Unique: true,
Columns: []*schema.Column{PropertiesColumns[16], PropertiesColumns[13]},
},
{
Name: "property_property_type_work_order_properties",
Unique: true,
Columns: []*schema.Column{PropertiesColumns[16], PropertiesColumns[23]},
},
{
Name: "property_property_type_project_properties",
Unique: true,
Columns: []*schema.Column{PropertiesColumns[16], PropertiesColumns[15]},
},
},
}
// PropertyTypesColumns holds the columns for the "property_types" table.
PropertyTypesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "type", Type: field.TypeEnum, Enums: []string{"string", "int", "bool", "float", "date", "enum", "range", "email", "gps_location", "datetime_local", "node"}},
{Name: "name", Type: field.TypeString},
{Name: "external_id", Type: field.TypeString, Unique: true, Nullable: true},
{Name: "index", Type: field.TypeInt, Nullable: true},
{Name: "category", Type: field.TypeString, Nullable: true},
{Name: "int_val", Type: field.TypeInt, Nullable: true},
{Name: "bool_val", Type: field.TypeBool, Nullable: true},
{Name: "float_val", Type: field.TypeFloat64, Nullable: true},
{Name: "latitude_val", Type: field.TypeFloat64, Nullable: true},
{Name: "longitude_val", Type: field.TypeFloat64, Nullable: true},
{Name: "string_val", Type: field.TypeString, Nullable: true, Size: 2147483647},
{Name: "range_from_val", Type: field.TypeFloat64, Nullable: true},
{Name: "range_to_val", Type: field.TypeFloat64, Nullable: true},
{Name: "is_instance_property", Type: field.TypeBool, Default: true},
{Name: "editable", Type: field.TypeBool, Default: true},
{Name: "mandatory", Type: field.TypeBool},
{Name: "deleted", Type: field.TypeBool},
{Name: "node_type", Type: field.TypeString, Nullable: true},
{Name: "equipment_port_type_property_types", Type: field.TypeInt, Nullable: true},
{Name: "equipment_port_type_link_property_types", Type: field.TypeInt, Nullable: true},
{Name: "equipment_type_property_types", Type: field.TypeInt, Nullable: true},
{Name: "location_type_property_types", Type: field.TypeInt, Nullable: true},
{Name: "project_type_properties", Type: field.TypeInt, Nullable: true},
{Name: "service_type_property_types", Type: field.TypeInt, Nullable: true},
{Name: "work_order_template_property_types", Type: field.TypeInt, Nullable: true},
{Name: "work_order_type_property_types", Type: field.TypeInt, Nullable: true},
}
// PropertyTypesTable holds the schema information for the "property_types" table.
PropertyTypesTable = &schema.Table{
Name: "property_types",
Columns: PropertyTypesColumns,
PrimaryKey: []*schema.Column{PropertyTypesColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "property_types_equipment_port_types_property_types",
Columns: []*schema.Column{PropertyTypesColumns[21]},
RefColumns: []*schema.Column{EquipmentPortTypesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "property_types_equipment_port_types_link_property_types",
Columns: []*schema.Column{PropertyTypesColumns[22]},
RefColumns: []*schema.Column{EquipmentPortTypesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "property_types_equipment_types_property_types",
Columns: []*schema.Column{PropertyTypesColumns[23]},
RefColumns: []*schema.Column{EquipmentTypesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "property_types_location_types_property_types",
Columns: []*schema.Column{PropertyTypesColumns[24]},
RefColumns: []*schema.Column{LocationTypesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "property_types_project_types_properties",
Columns: []*schema.Column{PropertyTypesColumns[25]},
RefColumns: []*schema.Column{ProjectTypesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "property_types_service_types_property_types",
Columns: []*schema.Column{PropertyTypesColumns[26]},
RefColumns: []*schema.Column{ServiceTypesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "property_types_work_order_templates_property_types",
Columns: []*schema.Column{PropertyTypesColumns[27]},
RefColumns: []*schema.Column{WorkOrderTemplatesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "property_types_work_order_types_property_types",
Columns: []*schema.Column{PropertyTypesColumns[28]},
RefColumns: []*schema.Column{WorkOrderTypesColumns[0]},
OnDelete: schema.SetNull,
},
},
Indexes: []*schema.Index{
{
Name: "propertytype_name_location_type_property_types",
Unique: true,
Columns: []*schema.Column{PropertyTypesColumns[4], PropertyTypesColumns[24]},
},
{
Name: "propertytype_name_equipment_port_type_property_types",
Unique: true,
Columns: []*schema.Column{PropertyTypesColumns[4], PropertyTypesColumns[21]},
},
{
Name: "propertytype_name_equipment_type_property_types",
Unique: true,
Columns: []*schema.Column{PropertyTypesColumns[4], PropertyTypesColumns[23]},
},
{
Name: "propertytype_name_equipment_port_type_link_property_types",
Unique: true,
Columns: []*schema.Column{PropertyTypesColumns[4], PropertyTypesColumns[22]},
},
{
Name: "propertytype_name_work_order_type_property_types",
Unique: true,
Columns: []*schema.Column{PropertyTypesColumns[4], PropertyTypesColumns[28]},
},
},
}
// ReportFiltersColumns holds the columns for the "report_filters" table.
ReportFiltersColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString},
{Name: "entity", Type: field.TypeEnum, Enums: []string{"WORK_ORDER", "PORT", "EQUIPMENT", "LINK", "LOCATION", "SERVICE"}},
{Name: "filters", Type: field.TypeString, Size: 2147483647, Default: "[]"},
}
// ReportFiltersTable holds the schema information for the "report_filters" table.
ReportFiltersTable = &schema.Table{
Name: "report_filters",
Columns: ReportFiltersColumns,
PrimaryKey: []*schema.Column{ReportFiltersColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
Indexes: []*schema.Index{
{
Name: "reportfilter_name_entity",
Unique: true,
Columns: []*schema.Column{ReportFiltersColumns[3], ReportFiltersColumns[4]},
},
},
}
// ServicesColumns holds the columns for the "services" table.
ServicesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString, Unique: true},
{Name: "external_id", Type: field.TypeString, Unique: true, Nullable: true},
{Name: "status", Type: field.TypeString},
{Name: "service_type", Type: field.TypeInt, Nullable: true},
}
// ServicesTable holds the schema information for the "services" table.
ServicesTable = &schema.Table{
Name: "services",
Columns: ServicesColumns,
PrimaryKey: []*schema.Column{ServicesColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "services_service_types_type",
Columns: []*schema.Column{ServicesColumns[6]},
RefColumns: []*schema.Column{ServiceTypesColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// ServiceEndpointsColumns holds the columns for the "service_endpoints" table.
ServiceEndpointsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "service_endpoints", Type: field.TypeInt, Nullable: true},
{Name: "service_endpoint_port", Type: field.TypeInt, Nullable: true},
{Name: "service_endpoint_equipment", Type: field.TypeInt, Nullable: true},
{Name: "service_endpoint_definition_endpoints", Type: field.TypeInt, Nullable: true},
}
// ServiceEndpointsTable holds the schema information for the "service_endpoints" table.
ServiceEndpointsTable = &schema.Table{
Name: "service_endpoints",
Columns: ServiceEndpointsColumns,
PrimaryKey: []*schema.Column{ServiceEndpointsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "service_endpoints_services_endpoints",
Columns: []*schema.Column{ServiceEndpointsColumns[3]},
RefColumns: []*schema.Column{ServicesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "service_endpoints_equipment_ports_port",
Columns: []*schema.Column{ServiceEndpointsColumns[4]},
RefColumns: []*schema.Column{EquipmentPortsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "service_endpoints_equipment_equipment",
Columns: []*schema.Column{ServiceEndpointsColumns[5]},
RefColumns: []*schema.Column{EquipmentColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "service_endpoints_service_endpoint_definitions_endpoints",
Columns: []*schema.Column{ServiceEndpointsColumns[6]},
RefColumns: []*schema.Column{ServiceEndpointDefinitionsColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// ServiceEndpointDefinitionsColumns holds the columns for the "service_endpoint_definitions" table.
ServiceEndpointDefinitionsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "role", Type: field.TypeString, Nullable: true},
{Name: "name", Type: field.TypeString},
{Name: "index", Type: field.TypeInt},
{Name: "equipment_type_service_endpoint_definitions", Type: field.TypeInt, Nullable: true},
{Name: "service_type_endpoint_definitions", Type: field.TypeInt, Nullable: true},
}
// ServiceEndpointDefinitionsTable holds the schema information for the "service_endpoint_definitions" table.
ServiceEndpointDefinitionsTable = &schema.Table{
Name: "service_endpoint_definitions",
Columns: ServiceEndpointDefinitionsColumns,
PrimaryKey: []*schema.Column{ServiceEndpointDefinitionsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "service_endpoint_definitions_equipment_types_service_endpoint_definitions",
Columns: []*schema.Column{ServiceEndpointDefinitionsColumns[6]},
RefColumns: []*schema.Column{EquipmentTypesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "service_endpoint_definitions_service_types_endpoint_definitions",
Columns: []*schema.Column{ServiceEndpointDefinitionsColumns[7]},
RefColumns: []*schema.Column{ServiceTypesColumns[0]},
OnDelete: schema.SetNull,
},
},
Indexes: []*schema.Index{
{
Name: "serviceendpointdefinition_index_service_type_endpoint_definitions",
Unique: true,
Columns: []*schema.Column{ServiceEndpointDefinitionsColumns[5], ServiceEndpointDefinitionsColumns[7]},
},
{
Name: "serviceendpointdefinition_name_service_type_endpoint_definitions",
Unique: true,
Columns: []*schema.Column{ServiceEndpointDefinitionsColumns[4], ServiceEndpointDefinitionsColumns[7]},
},
},
}
// ServiceTypesColumns holds the columns for the "service_types" table.
ServiceTypesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString, Unique: true},
{Name: "has_customer", Type: field.TypeBool},
{Name: "is_deleted", Type: field.TypeBool},
{Name: "discovery_method", Type: field.TypeEnum, Nullable: true, Enums: []string{"INVENTORY"}},
}
// ServiceTypesTable holds the schema information for the "service_types" table.
ServiceTypesTable = &schema.Table{
Name: "service_types",
Columns: ServiceTypesColumns,
PrimaryKey: []*schema.Column{ServiceTypesColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
}
// SurveysColumns holds the columns for the "surveys" table.
SurveysColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString},
{Name: "owner_name", Type: field.TypeString, Nullable: true},
{Name: "creation_timestamp", Type: field.TypeTime, Nullable: true},
{Name: "completion_timestamp", Type: field.TypeTime},
{Name: "survey_location", Type: field.TypeInt, Nullable: true},
}
// SurveysTable holds the schema information for the "surveys" table.
SurveysTable = &schema.Table{
Name: "surveys",
Columns: SurveysColumns,
PrimaryKey: []*schema.Column{SurveysColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "surveys_locations_location",
Columns: []*schema.Column{SurveysColumns[7]},
RefColumns: []*schema.Column{LocationsColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// SurveyCellScansColumns holds the columns for the "survey_cell_scans" table.
SurveyCellScansColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "network_type", Type: field.TypeString},
{Name: "signal_strength", Type: field.TypeInt},
{Name: "timestamp", Type: field.TypeTime, Nullable: true},
{Name: "base_station_id", Type: field.TypeString, Nullable: true},
{Name: "network_id", Type: field.TypeString, Nullable: true},
{Name: "system_id", Type: field.TypeString, Nullable: true},
{Name: "cell_id", Type: field.TypeString, Nullable: true},
{Name: "location_area_code", Type: field.TypeString, Nullable: true},
{Name: "mobile_country_code", Type: field.TypeString, Nullable: true},
{Name: "mobile_network_code", Type: field.TypeString, Nullable: true},
{Name: "primary_scrambling_code", Type: field.TypeString, Nullable: true},
{Name: "operator", Type: field.TypeString, Nullable: true},
{Name: "arfcn", Type: field.TypeInt, Nullable: true},
{Name: "physical_cell_id", Type: field.TypeString, Nullable: true},
{Name: "tracking_area_code", Type: field.TypeString, Nullable: true},
{Name: "timing_advance", Type: field.TypeInt, Nullable: true},
{Name: "earfcn", Type: field.TypeInt, Nullable: true},
{Name: "uarfcn", Type: field.TypeInt, Nullable: true},
{Name: "latitude", Type: field.TypeFloat64, Nullable: true},
{Name: "longitude", Type: field.TypeFloat64, Nullable: true},
{Name: "survey_cell_scan_checklist_item", Type: field.TypeInt, Nullable: true},
{Name: "survey_cell_scan_survey_question", Type: field.TypeInt, Nullable: true},
{Name: "survey_cell_scan_location", Type: field.TypeInt, Nullable: true},
}
// SurveyCellScansTable holds the schema information for the "survey_cell_scans" table.
SurveyCellScansTable = &schema.Table{
Name: "survey_cell_scans",
Columns: SurveyCellScansColumns,
PrimaryKey: []*schema.Column{SurveyCellScansColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "survey_cell_scans_check_list_items_checklist_item",
Columns: []*schema.Column{SurveyCellScansColumns[23]},
RefColumns: []*schema.Column{CheckListItemsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "survey_cell_scans_survey_questions_survey_question",
Columns: []*schema.Column{SurveyCellScansColumns[24]},
RefColumns: []*schema.Column{SurveyQuestionsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "survey_cell_scans_locations_location",
Columns: []*schema.Column{SurveyCellScansColumns[25]},
RefColumns: []*schema.Column{LocationsColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// SurveyQuestionsColumns holds the columns for the "survey_questions" table.
SurveyQuestionsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "form_name", Type: field.TypeString, Nullable: true},
{Name: "form_description", Type: field.TypeString, Nullable: true},
{Name: "form_index", Type: field.TypeInt},
{Name: "question_type", Type: field.TypeString, Nullable: true},
{Name: "question_format", Type: field.TypeString, Nullable: true},
{Name: "question_text", Type: field.TypeString, Nullable: true},
{Name: "question_index", Type: field.TypeInt},
{Name: "bool_data", Type: field.TypeBool, Nullable: true},
{Name: "email_data", Type: field.TypeString, Nullable: true},
{Name: "latitude", Type: field.TypeFloat64, Nullable: true},
{Name: "longitude", Type: field.TypeFloat64, Nullable: true},
{Name: "location_accuracy", Type: field.TypeFloat64, Nullable: true},
{Name: "altitude", Type: field.TypeFloat64, Nullable: true},
{Name: "phone_data", Type: field.TypeString, Nullable: true},
{Name: "text_data", Type: field.TypeString, Nullable: true},
{Name: "float_data", Type: field.TypeFloat64, Nullable: true},
{Name: "int_data", Type: field.TypeInt, Nullable: true},
{Name: "date_data", Type: field.TypeTime, Nullable: true},
{Name: "survey_question_survey", Type: field.TypeInt, Nullable: true},
}
// SurveyQuestionsTable holds the schema information for the "survey_questions" table.
SurveyQuestionsTable = &schema.Table{
Name: "survey_questions",
Columns: SurveyQuestionsColumns,
PrimaryKey: []*schema.Column{SurveyQuestionsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "survey_questions_surveys_survey",
Columns: []*schema.Column{SurveyQuestionsColumns[21]},
RefColumns: []*schema.Column{SurveysColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// SurveyTemplateCategoriesColumns holds the columns for the "survey_template_categories" table.
SurveyTemplateCategoriesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "category_title", Type: field.TypeString},
{Name: "category_description", Type: field.TypeString},
{Name: "location_type_survey_template_categories", Type: field.TypeInt, Nullable: true},
}
// SurveyTemplateCategoriesTable holds the schema information for the "survey_template_categories" table.
SurveyTemplateCategoriesTable = &schema.Table{
Name: "survey_template_categories",
Columns: SurveyTemplateCategoriesColumns,
PrimaryKey: []*schema.Column{SurveyTemplateCategoriesColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "survey_template_categories_location_types_survey_template_categories",
Columns: []*schema.Column{SurveyTemplateCategoriesColumns[5]},
RefColumns: []*schema.Column{LocationTypesColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// SurveyTemplateQuestionsColumns holds the columns for the "survey_template_questions" table.
SurveyTemplateQuestionsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "question_title", Type: field.TypeString},
{Name: "question_description", Type: field.TypeString},
{Name: "question_type", Type: field.TypeString},
{Name: "index", Type: field.TypeInt},
{Name: "survey_template_category_survey_template_questions", Type: field.TypeInt, Nullable: true},
}
// SurveyTemplateQuestionsTable holds the schema information for the "survey_template_questions" table.
SurveyTemplateQuestionsTable = &schema.Table{
Name: "survey_template_questions",
Columns: SurveyTemplateQuestionsColumns,
PrimaryKey: []*schema.Column{SurveyTemplateQuestionsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "survey_template_questions_survey_template_categories_survey_template_questions",
Columns: []*schema.Column{SurveyTemplateQuestionsColumns[7]},
RefColumns: []*schema.Column{SurveyTemplateCategoriesColumns[0]},
OnDelete: schema.SetNull,
},
},
Indexes: []*schema.Index{
{
Name: "surveytemplatequestion_index_survey_template_category_survey_template_questions",
Unique: true,
Columns: []*schema.Column{SurveyTemplateQuestionsColumns[6], SurveyTemplateQuestionsColumns[7]},
},
},
}
// SurveyWiFiScansColumns holds the columns for the "survey_wi_fi_scans" table.
SurveyWiFiScansColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "ssid", Type: field.TypeString, Nullable: true},
{Name: "bssid", Type: field.TypeString},
{Name: "timestamp", Type: field.TypeTime},
{Name: "frequency", Type: field.TypeInt},
{Name: "channel", Type: field.TypeInt},
{Name: "band", Type: field.TypeString, Nullable: true},
{Name: "channel_width", Type: field.TypeInt, Nullable: true},
{Name: "capabilities", Type: field.TypeString, Nullable: true},
{Name: "strength", Type: field.TypeInt},
{Name: "latitude", Type: field.TypeFloat64, Nullable: true},
{Name: "longitude", Type: field.TypeFloat64, Nullable: true},
{Name: "survey_wi_fi_scan_checklist_item", Type: field.TypeInt, Nullable: true},
{Name: "survey_wi_fi_scan_survey_question", Type: field.TypeInt, Nullable: true},
{Name: "survey_wi_fi_scan_location", Type: field.TypeInt, Nullable: true},
}
// SurveyWiFiScansTable holds the schema information for the "survey_wi_fi_scans" table.
SurveyWiFiScansTable = &schema.Table{
Name: "survey_wi_fi_scans",
Columns: SurveyWiFiScansColumns,
PrimaryKey: []*schema.Column{SurveyWiFiScansColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "survey_wi_fi_scans_check_list_items_checklist_item",
Columns: []*schema.Column{SurveyWiFiScansColumns[14]},
RefColumns: []*schema.Column{CheckListItemsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "survey_wi_fi_scans_survey_questions_survey_question",
Columns: []*schema.Column{SurveyWiFiScansColumns[15]},
RefColumns: []*schema.Column{SurveyQuestionsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "survey_wi_fi_scans_locations_location",
Columns: []*schema.Column{SurveyWiFiScansColumns[16]},
RefColumns: []*schema.Column{LocationsColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// UsersColumns holds the columns for the "users" table.
UsersColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "auth_id", Type: field.TypeString, Unique: true},
{Name: "first_name", Type: field.TypeString, Nullable: true},
{Name: "last_name", Type: field.TypeString, Nullable: true},
{Name: "email", Type: field.TypeString, Nullable: true},
{Name: "status", Type: field.TypeEnum, Enums: []string{"ACTIVE", "DEACTIVATED"}, Default: "ACTIVE"},
{Name: "role", Type: field.TypeEnum, Enums: []string{"USER", "ADMIN", "OWNER"}, Default: "USER"},
}
// UsersTable holds the schema information for the "users" table.
UsersTable = &schema.Table{
Name: "users",
Columns: UsersColumns,
PrimaryKey: []*schema.Column{UsersColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
}
// UsersGroupsColumns holds the columns for the "users_groups" table.
UsersGroupsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString, Unique: true},
{Name: "description", Type: field.TypeString, Nullable: true},
{Name: "status", Type: field.TypeEnum, Enums: []string{"ACTIVE", "DEACTIVATED"}, Default: "ACTIVE"},
}
// UsersGroupsTable holds the schema information for the "users_groups" table.
UsersGroupsTable = &schema.Table{
Name: "users_groups",
Columns: UsersGroupsColumns,
PrimaryKey: []*schema.Column{UsersGroupsColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
}
// WorkOrdersColumns holds the columns for the "work_orders" table.
WorkOrdersColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "name", Type: field.TypeString},
{Name: "status", Type: field.TypeEnum, Enums: []string{"PENDING", "PLANNED", "DONE"}, Default: "PLANNED"},
{Name: "priority", Type: field.TypeEnum, Enums: []string{"URGENT", "HIGH", "MEDIUM", "LOW", "NONE"}, Default: "NONE"},
{Name: "description", Type: field.TypeString, Nullable: true, Size: 2147483647},
{Name: "install_date", Type: field.TypeTime, Nullable: true},
{Name: "creation_date", Type: field.TypeTime},
{Name: "index", Type: field.TypeInt, Nullable: true},
{Name: "close_date", Type: field.TypeTime, Nullable: true},
{Name: "project_work_orders", Type: field.TypeInt, Nullable: true},
{Name: "work_order_type", Type: field.TypeInt, Nullable: true},
{Name: "work_order_template", Type: field.TypeInt, Nullable: true},
{Name: "work_order_location", Type: field.TypeInt, Nullable: true},
{Name: "work_order_owner", Type: field.TypeInt, Nullable: true},
{Name: "work_order_assignee", Type: field.TypeInt, Nullable: true},
}
// WorkOrdersTable holds the schema information for the "work_orders" table.
WorkOrdersTable = &schema.Table{
Name: "work_orders",
Columns: WorkOrdersColumns,
PrimaryKey: []*schema.Column{WorkOrdersColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "work_orders_projects_work_orders",
Columns: []*schema.Column{WorkOrdersColumns[11]},
RefColumns: []*schema.Column{ProjectsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "work_orders_work_order_types_type",
Columns: []*schema.Column{WorkOrdersColumns[12]},
RefColumns: []*schema.Column{WorkOrderTypesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "work_orders_work_order_templates_template",
Columns: []*schema.Column{WorkOrdersColumns[13]},
RefColumns: []*schema.Column{WorkOrderTemplatesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "work_orders_locations_location",
Columns: []*schema.Column{WorkOrdersColumns[14]},
RefColumns: []*schema.Column{LocationsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "work_orders_users_owner",
Columns: []*schema.Column{WorkOrdersColumns[15]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "work_orders_users_assignee",
Columns: []*schema.Column{WorkOrdersColumns[16]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.SetNull,
},
},
Indexes: []*schema.Index{
{
Name: "workorder_creation_date",
Unique: false,
Columns: []*schema.Column{WorkOrdersColumns[8]},
},
{
Name: "workorder_close_date",
Unique: false,
Columns: []*schema.Column{WorkOrdersColumns[10]},
},
{
Name: "workorder_create_time",
Unique: false,
Columns: []*schema.Column{WorkOrdersColumns[1]},
},
{
Name: "workorder_update_time",
Unique: false,
Columns: []*schema.Column{WorkOrdersColumns[2]},
},
},
}
// WorkOrderDefinitionsColumns holds the columns for the "work_order_definitions" table.
WorkOrderDefinitionsColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "create_time", Type: field.TypeTime},
{Name: "update_time", Type: field.TypeTime},
{Name: "index", Type: field.TypeInt, Nullable: true},
{Name: "project_type_work_orders", Type: field.TypeInt, Nullable: true},
{Name: "work_order_definition_type", Type: field.TypeInt, Nullable: true},
}
// WorkOrderDefinitionsTable holds the schema information for the "work_order_definitions" table.
WorkOrderDefinitionsTable = &schema.Table{
Name: "work_order_definitions",
Columns: WorkOrderDefinitionsColumns,
PrimaryKey: []*schema.Column{WorkOrderDefinitionsColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "work_order_definitions_project_types_work_orders",
Columns: []*schema.Column{WorkOrderDefinitionsColumns[4]},
RefColumns: []*schema.Column{ProjectTypesColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "work_order_definitions_work_order_types_type",
Columns: []*schema.Column{WorkOrderDefinitionsColumns[5]},
RefColumns: []*schema.Column{WorkOrderTypesColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// WorkOrderTemplatesColumns holds the columns for the "work_order_templates" table.
WorkOrderTemplatesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "name", Type: field.TypeString},
{Name: "description", Type: field.TypeString, Nullable: true, Size: 2147483647},
{Name: "work_order_template_type", Type: field.TypeInt, Nullable: true},
}
// WorkOrderTemplatesTable holds the schema information for the "work_order_templates" table.
WorkOrderTemplatesTable = &schema.Table{
Name: "work_order_templates",
Columns: WorkOrderTemplatesColumns,
PrimaryKey: []*schema.Column{WorkOrderTemplatesColumns[0]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "work_order_templates_work_order_types_type",
Columns: []*schema.Column{WorkOrderTemplatesColumns[3]},
RefColumns: []*schema.Column{WorkOrderTypesColumns[0]},
OnDelete: schema.SetNull,
},
},
}
// WorkOrderTypesColumns holds the columns for the "work_order_types" table.
WorkOrderTypesColumns = []*schema.Column{
{Name: "id", Type: field.TypeInt, Increment: true},
{Name: "name", Type: field.TypeString},
{Name: "description", Type: field.TypeString, Nullable: true, Size: 2147483647},
}
// WorkOrderTypesTable holds the schema information for the "work_order_types" table.
WorkOrderTypesTable = &schema.Table{
Name: "work_order_types",
Columns: WorkOrderTypesColumns,
PrimaryKey: []*schema.Column{WorkOrderTypesColumns[0]},
ForeignKeys: []*schema.ForeignKey{},
Indexes: []*schema.Index{
{
Name: "workordertype_name",
Unique: true,
Columns: []*schema.Column{WorkOrderTypesColumns[1]},
},
},
}
// ServiceUpstreamColumns holds the columns for the "service_upstream" table.
ServiceUpstreamColumns = []*schema.Column{
{Name: "service_id", Type: field.TypeInt},
{Name: "downstream_id", Type: field.TypeInt},
}
// ServiceUpstreamTable holds the schema information for the "service_upstream" table.
ServiceUpstreamTable = &schema.Table{
Name: "service_upstream",
Columns: ServiceUpstreamColumns,
PrimaryKey: []*schema.Column{ServiceUpstreamColumns[0], ServiceUpstreamColumns[1]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "service_upstream_service_id",
Columns: []*schema.Column{ServiceUpstreamColumns[0]},
RefColumns: []*schema.Column{ServicesColumns[0]},
OnDelete: schema.Cascade,
},
{
Symbol: "service_upstream_downstream_id",
Columns: []*schema.Column{ServiceUpstreamColumns[1]},
RefColumns: []*schema.Column{ServicesColumns[0]},
OnDelete: schema.Cascade,
},
},
}
// ServiceLinksColumns holds the columns for the "service_links" table.
ServiceLinksColumns = []*schema.Column{
{Name: "service_id", Type: field.TypeInt},
{Name: "link_id", Type: field.TypeInt},
}
// ServiceLinksTable holds the schema information for the "service_links" table.
ServiceLinksTable = &schema.Table{
Name: "service_links",
Columns: ServiceLinksColumns,
PrimaryKey: []*schema.Column{ServiceLinksColumns[0], ServiceLinksColumns[1]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "service_links_service_id",
Columns: []*schema.Column{ServiceLinksColumns[0]},
RefColumns: []*schema.Column{ServicesColumns[0]},
OnDelete: schema.Cascade,
},
{
Symbol: "service_links_link_id",
Columns: []*schema.Column{ServiceLinksColumns[1]},
RefColumns: []*schema.Column{LinksColumns[0]},
OnDelete: schema.Cascade,
},
},
}
// ServiceCustomerColumns holds the columns for the "service_customer" table.
ServiceCustomerColumns = []*schema.Column{
{Name: "service_id", Type: field.TypeInt},
{Name: "customer_id", Type: field.TypeInt},
}
// ServiceCustomerTable holds the schema information for the "service_customer" table.
ServiceCustomerTable = &schema.Table{
Name: "service_customer",
Columns: ServiceCustomerColumns,
PrimaryKey: []*schema.Column{ServiceCustomerColumns[0], ServiceCustomerColumns[1]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "service_customer_service_id",
Columns: []*schema.Column{ServiceCustomerColumns[0]},
RefColumns: []*schema.Column{ServicesColumns[0]},
OnDelete: schema.Cascade,
},
{
Symbol: "service_customer_customer_id",
Columns: []*schema.Column{ServiceCustomerColumns[1]},
RefColumns: []*schema.Column{CustomersColumns[0]},
OnDelete: schema.Cascade,
},
},
}
// UsersGroupMembersColumns holds the columns for the "users_group_members" table.
UsersGroupMembersColumns = []*schema.Column{
{Name: "users_group_id", Type: field.TypeInt},
{Name: "user_id", Type: field.TypeInt},
}
// UsersGroupMembersTable holds the schema information for the "users_group_members" table.
UsersGroupMembersTable = &schema.Table{
Name: "users_group_members",
Columns: UsersGroupMembersColumns,
PrimaryKey: []*schema.Column{UsersGroupMembersColumns[0], UsersGroupMembersColumns[1]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "users_group_members_users_group_id",
Columns: []*schema.Column{UsersGroupMembersColumns[0]},
RefColumns: []*schema.Column{UsersGroupsColumns[0]},
OnDelete: schema.Cascade,
},
{
Symbol: "users_group_members_user_id",
Columns: []*schema.Column{UsersGroupMembersColumns[1]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.Cascade,
},
},
}
// UsersGroupPoliciesColumns holds the columns for the "users_group_policies" table.
UsersGroupPoliciesColumns = []*schema.Column{
{Name: "users_group_id", Type: field.TypeInt},
{Name: "permissions_policy_id", Type: field.TypeInt},
}
// UsersGroupPoliciesTable holds the schema information for the "users_group_policies" table.
UsersGroupPoliciesTable = &schema.Table{
Name: "users_group_policies",
Columns: UsersGroupPoliciesColumns,
PrimaryKey: []*schema.Column{UsersGroupPoliciesColumns[0], UsersGroupPoliciesColumns[1]},
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "users_group_policies_users_group_id",
Columns: []*schema.Column{UsersGroupPoliciesColumns[0]},
RefColumns: []*schema.Column{UsersGroupsColumns[0]},
OnDelete: schema.Cascade,
},
{
Symbol: "users_group_policies_permissions_policy_id",
Columns: []*schema.Column{UsersGroupPoliciesColumns[1]},
RefColumns: []*schema.Column{PermissionsPoliciesColumns[0]},
OnDelete: schema.Cascade,
},
},
}
// Tables holds all the tables in the schema.
Tables = []*schema.Table{
ActionsRulesTable,
ActivitiesTable,
CheckListCategoriesTable,
CheckListCategoryDefinitionsTable,
CheckListItemsTable,
CheckListItemDefinitionsTable,
CommentsTable,
CustomersTable,
EquipmentTable,
EquipmentCategoriesTable,
EquipmentPortsTable,
EquipmentPortDefinitionsTable,
EquipmentPortTypesTable,
EquipmentPositionsTable,
EquipmentPositionDefinitionsTable,
EquipmentTypesTable,
FilesTable,
FloorPlansTable,
FloorPlanReferencePointsTable,
FloorPlanScalesTable,
HyperlinksTable,
LinksTable,
LocationsTable,
LocationTypesTable,
PermissionsPoliciesTable,
ProjectsTable,
ProjectTypesTable,
PropertiesTable,
PropertyTypesTable,
ReportFiltersTable,
ServicesTable,
ServiceEndpointsTable,
ServiceEndpointDefinitionsTable,
ServiceTypesTable,
SurveysTable,
SurveyCellScansTable,
SurveyQuestionsTable,
SurveyTemplateCategoriesTable,
SurveyTemplateQuestionsTable,
SurveyWiFiScansTable,
UsersTable,
UsersGroupsTable,
WorkOrdersTable,
WorkOrderDefinitionsTable,
WorkOrderTemplatesTable,
WorkOrderTypesTable,
ServiceUpstreamTable,
ServiceLinksTable,
ServiceCustomerTable,
UsersGroupMembersTable,
UsersGroupPoliciesTable,
}
)
func init()
|
{
ActivitiesTable.ForeignKeys[0].RefTable = UsersTable
ActivitiesTable.ForeignKeys[1].RefTable = WorkOrdersTable
CheckListCategoriesTable.ForeignKeys[0].RefTable = WorkOrdersTable
CheckListCategoryDefinitionsTable.ForeignKeys[0].RefTable = WorkOrderTemplatesTable
CheckListCategoryDefinitionsTable.ForeignKeys[1].RefTable = WorkOrderTypesTable
CheckListItemsTable.ForeignKeys[0].RefTable = CheckListCategoriesTable
CheckListItemDefinitionsTable.ForeignKeys[0].RefTable = CheckListCategoryDefinitionsTable
CommentsTable.ForeignKeys[0].RefTable = UsersTable
CommentsTable.ForeignKeys[1].RefTable = ProjectsTable
CommentsTable.ForeignKeys[2].RefTable = WorkOrdersTable
EquipmentTable.ForeignKeys[0].RefTable = EquipmentTypesTable
EquipmentTable.ForeignKeys[1].RefTable = WorkOrdersTable
EquipmentTable.ForeignKeys[2].RefTable = EquipmentPositionsTable
EquipmentTable.ForeignKeys[3].RefTable = LocationsTable
EquipmentPortsTable.ForeignKeys[0].RefTable = EquipmentTable
EquipmentPortsTable.ForeignKeys[1].RefTable = EquipmentPortDefinitionsTable
EquipmentPortsTable.ForeignKeys[2].RefTable = LinksTable
EquipmentPortDefinitionsTable.ForeignKeys[0].RefTable = EquipmentPortTypesTable
EquipmentPortDefinitionsTable.ForeignKeys[1].RefTable = EquipmentTypesTable
EquipmentPositionsTable.ForeignKeys[0].RefTable = EquipmentTable
EquipmentPositionsTable.ForeignKeys[1].RefTable = EquipmentPositionDefinitionsTable
EquipmentPositionDefinitionsTable.ForeignKeys[0].RefTable = EquipmentTypesTable
EquipmentTypesTable.ForeignKeys[0].RefTable = EquipmentCategoriesTable
FilesTable.ForeignKeys[0].RefTable = CheckListItemsTable
FilesTable.ForeignKeys[1].RefTable = EquipmentTable
FilesTable.ForeignKeys[2].RefTable = FloorPlansTable
FilesTable.ForeignKeys[3].RefTable = LocationsTable
FilesTable.ForeignKeys[4].RefTable = SurveysTable
FilesTable.ForeignKeys[5].RefTable = SurveyQuestionsTable
FilesTable.ForeignKeys[6].RefTable = SurveyQuestionsTable
FilesTable.ForeignKeys[7].RefTable = UsersTable
FilesTable.ForeignKeys[8].RefTable = WorkOrdersTable
FloorPlansTable.ForeignKeys[0].RefTable = LocationsTable
FloorPlansTable.ForeignKeys[1].RefTable = FloorPlanReferencePointsTable
FloorPlansTable.ForeignKeys[2].RefTable = FloorPlanScalesTable
HyperlinksTable.ForeignKeys[0].RefTable = EquipmentTable
HyperlinksTable.ForeignKeys[1].RefTable = LocationsTable
HyperlinksTable.ForeignKeys[2].RefTable = WorkOrdersTable
LinksTable.ForeignKeys[0].RefTable = WorkOrdersTable
LocationsTable.ForeignKeys[0].RefTable = LocationTypesTable
LocationsTable.ForeignKeys[1].RefTable = LocationsTable
ProjectsTable.ForeignKeys[0].RefTable = LocationsTable
ProjectsTable.ForeignKeys[1].RefTable = UsersTable
ProjectsTable.ForeignKeys[2].RefTable = ProjectTypesTable
PropertiesTable.ForeignKeys[0].RefTable = EquipmentTable
PropertiesTable.ForeignKeys[1].RefTable = EquipmentPortsTable
PropertiesTable.ForeignKeys[2].RefTable = LinksTable
PropertiesTable.ForeignKeys[3].RefTable = LocationsTable
PropertiesTable.ForeignKeys[4].RefTable = ProjectsTable
PropertiesTable.ForeignKeys[5].RefTable = PropertyTypesTable
PropertiesTable.ForeignKeys[6].RefTable = EquipmentTable
PropertiesTable.ForeignKeys[7].RefTable = LocationsTable
PropertiesTable.ForeignKeys[8].RefTable = ServicesTable
PropertiesTable.ForeignKeys[9].RefTable = WorkOrdersTable
PropertiesTable.ForeignKeys[10].RefTable = UsersTable
PropertiesTable.ForeignKeys[11].RefTable = ServicesTable
PropertiesTable.ForeignKeys[12].RefTable = WorkOrdersTable
PropertyTypesTable.ForeignKeys[0].RefTable = EquipmentPortTypesTable
PropertyTypesTable.ForeignKeys[1].RefTable = EquipmentPortTypesTable
PropertyTypesTable.ForeignKeys[2].RefTable = EquipmentTypesTable
PropertyTypesTable.ForeignKeys[3].RefTable = LocationTypesTable
PropertyTypesTable.ForeignKeys[4].RefTable = ProjectTypesTable
PropertyTypesTable.ForeignKeys[5].RefTable = ServiceTypesTable
PropertyTypesTable.ForeignKeys[6].RefTable = WorkOrderTemplatesTable
PropertyTypesTable.ForeignKeys[7].RefTable = WorkOrderTypesTable
ServicesTable.ForeignKeys[0].RefTable = ServiceTypesTable
ServiceEndpointsTable.ForeignKeys[0].RefTable = ServicesTable
ServiceEndpointsTable.ForeignKeys[1].RefTable = EquipmentPortsTable
ServiceEndpointsTable.ForeignKeys[2].RefTable = EquipmentTable
ServiceEndpointsTable.ForeignKeys[3].RefTable = ServiceEndpointDefinitionsTable
ServiceEndpointDefinitionsTable.ForeignKeys[0].RefTable = EquipmentTypesTable
ServiceEndpointDefinitionsTable.ForeignKeys[1].RefTable = ServiceTypesTable
SurveysTable.ForeignKeys[0].RefTable = LocationsTable
SurveyCellScansTable.ForeignKeys[0].RefTable = CheckListItemsTable
SurveyCellScansTable.ForeignKeys[1].RefTable = SurveyQuestionsTable
SurveyCellScansTable.ForeignKeys[2].RefTable = LocationsTable
SurveyQuestionsTable.ForeignKeys[0].RefTable = SurveysTable
SurveyTemplateCategoriesTable.ForeignKeys[0].RefTable = LocationTypesTable
SurveyTemplateQuestionsTable.ForeignKeys[0].RefTable = SurveyTemplateCategoriesTable
SurveyWiFiScansTable.ForeignKeys[0].RefTable = CheckListItemsTable
SurveyWiFiScansTable.ForeignKeys[1].RefTable = SurveyQuestionsTable
SurveyWiFiScansTable.ForeignKeys[2].RefTable = LocationsTable
WorkOrdersTable.ForeignKeys[0].RefTable = ProjectsTable
WorkOrdersTable.ForeignKeys[1].RefTable = WorkOrderTypesTable
WorkOrdersTable.ForeignKeys[2].RefTable = WorkOrderTemplatesTable
WorkOrdersTable.ForeignKeys[3].RefTable = LocationsTable
WorkOrdersTable.ForeignKeys[4].RefTable = UsersTable
WorkOrdersTable.ForeignKeys[5].RefTable = UsersTable
WorkOrderDefinitionsTable.ForeignKeys[0].RefTable = ProjectTypesTable
WorkOrderDefinitionsTable.ForeignKeys[1].RefTable = WorkOrderTypesTable
WorkOrderTemplatesTable.ForeignKeys[0].RefTable = WorkOrderTypesTable
ServiceUpstreamTable.ForeignKeys[0].RefTable = ServicesTable
ServiceUpstreamTable.ForeignKeys[1].RefTable = ServicesTable
ServiceLinksTable.ForeignKeys[0].RefTable = ServicesTable
ServiceLinksTable.ForeignKeys[1].RefTable = LinksTable
ServiceCustomerTable.ForeignKeys[0].RefTable = ServicesTable
ServiceCustomerTable.ForeignKeys[1].RefTable = CustomersTable
UsersGroupMembersTable.ForeignKeys[0].RefTable = UsersGroupsTable
UsersGroupMembersTable.ForeignKeys[1].RefTable = UsersTable
UsersGroupPoliciesTable.ForeignKeys[0].RefTable = UsersGroupsTable
UsersGroupPoliciesTable.ForeignKeys[1].RefTable = PermissionsPoliciesTable
}
|
|
map.d.ts
|
import { ResourceBase } from '../resource';
import { Value } from '../dataTypes';
export declare class MapConfiguration {
Style: Value<string>;
constructor(properties: MapConfiguration);
}
export interface MapProperties {
Configuration: MapConfiguration;
Description?: Value<string>;
MapName: Value<string>;
PricingPlan: Value<string>;
}
export default class
|
extends ResourceBase<MapProperties> {
static MapConfiguration: typeof MapConfiguration;
constructor(properties: MapProperties);
}
|
Map
|
AlipayCommerceEducateCampusExamineQueryRequest.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceEducateCampusExamineQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def
|
(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.commerce.educate.campus.examine.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
udf_params
|
lower.rs
|
//! Lowering rules for X64.
// ISLE integration glue.
mod isle;
use crate::data_value::DataValue;
use crate::ir::{
condcodes::{CondCode, FloatCC, IntCC},
types, AbiParam, ArgumentPurpose, ExternalName, Inst as IRInst, InstructionData, LibCall,
Opcode, Signature, Type,
};
use crate::isa::x64::abi::*;
use crate::isa::x64::inst::args::*;
use crate::isa::x64::inst::*;
use crate::isa::{x64::settings as x64_settings, x64::X64Backend, CallConv};
use crate::machinst::lower::*;
use crate::machinst::*;
use crate::result::CodegenResult;
use crate::settings::{Flags, TlsModel};
use alloc::boxed::Box;
use alloc::vec::Vec;
use log::trace;
use regalloc::{Reg, RegClass, Writable};
use smallvec::{smallvec, SmallVec};
use std::convert::TryFrom;
use target_lexicon::Triple;
//=============================================================================
// Helpers for instruction lowering.
fn is_int_or_ref_ty(ty: Type) -> bool {
match ty {
types::I8 | types::I16 | types::I32 | types::I64 | types::R64 => true,
types::B1 | types::B8 | types::B16 | types::B32 | types::B64 => true,
types::R32 => panic!("shouldn't have 32-bits refs on x64"),
_ => false,
}
}
fn is_bool_ty(ty: Type) -> bool {
match ty {
types::B1 | types::B8 | types::B16 | types::B32 | types::B64 => true,
types::R32 => panic!("shouldn't have 32-bits refs on x64"),
_ => false,
}
}
/// This is target-word-size dependent. And it excludes booleans and reftypes.
fn is_valid_atomic_transaction_ty(ty: Type) -> bool {
match ty {
types::I8 | types::I16 | types::I32 | types::I64 => true,
_ => false,
}
}
/// Returns whether the given specified `input` is a result produced by an instruction with Opcode
/// `op`.
// TODO investigate failures with checking against the result index.
fn matches_input<C: LowerCtx<I = Inst>>(
ctx: &mut C,
input: InsnInput,
op: Opcode,
) -> Option<IRInst> {
let inputs = ctx.get_input_as_source_or_const(input.insn, input.input);
inputs.inst.and_then(|(src_inst, _)| {
let data = ctx.data(src_inst);
if data.opcode() == op {
return Some(src_inst);
}
None
})
}
/// Returns whether the given specified `input` is a result produced by an instruction with any of
/// the opcodes specified in `ops`.
fn matches_input_any<C: LowerCtx<I = Inst>>(
ctx: &mut C,
input: InsnInput,
ops: &[Opcode],
) -> Option<IRInst> {
let inputs = ctx.get_input_as_source_or_const(input.insn, input.input);
inputs.inst.and_then(|(src_inst, _)| {
let data = ctx.data(src_inst);
for &op in ops {
if data.opcode() == op {
return Some(src_inst);
}
}
None
})
}
/// Emits instruction(s) to generate the given 64-bit constant value into a newly-allocated
/// temporary register, returning that register.
fn generate_constant<C: LowerCtx<I = Inst>>(ctx: &mut C, ty: Type, c: u64) -> ValueRegs<Reg> {
let from_bits = ty_bits(ty);
let masked = if from_bits < 64 {
c & ((1u64 << from_bits) - 1)
} else {
c
};
let cst_copy = ctx.alloc_tmp(ty);
for inst in Inst::gen_constant(cst_copy, masked as u128, ty, |ty| {
ctx.alloc_tmp(ty).only_reg().unwrap()
})
.into_iter()
{
ctx.emit(inst);
}
non_writable_value_regs(cst_copy)
}
/// Put the given input into possibly multiple registers, and mark it as used (side-effect).
fn put_input_in_regs<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> ValueRegs<Reg> {
let ty = ctx.input_ty(spec.insn, spec.input);
let input = ctx.get_input_as_source_or_const(spec.insn, spec.input);
if let Some(c) = input.constant {
// Generate constants fresh at each use to minimize long-range register pressure.
generate_constant(ctx, ty, c)
} else {
ctx.put_input_in_regs(spec.insn, spec.input)
}
}
/// Put the given input into a register, and mark it as used (side-effect).
fn put_input_in_reg<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> Reg
|
/// Determines whether a load operation (indicated by `src_insn`) can be merged
/// into the current lowering point. If so, returns the address-base source (as
/// an `InsnInput`) and an offset from that address from which to perform the
/// load.
fn is_mergeable_load<C: LowerCtx<I = Inst>>(
ctx: &mut C,
src_insn: IRInst,
) -> Option<(InsnInput, i32)> {
let insn_data = ctx.data(src_insn);
let inputs = ctx.num_inputs(src_insn);
if inputs != 1 {
return None;
}
let load_ty = ctx.output_ty(src_insn, 0);
if ty_bits(load_ty) < 32 {
// Narrower values are handled by ALU insts that are at least 32 bits
// wide, which is normally OK as we ignore upper buts; but, if we
// generate, e.g., a direct-from-memory 32-bit add for a byte value and
// the byte is the last byte in a page, the extra data that we load is
// incorrectly accessed. So we only allow loads to merge for
// 32-bit-and-above widths.
return None;
}
// SIMD instructions can only be load-coalesced when the loaded value comes
// from an aligned address.
if load_ty.is_vector() && !insn_data.memflags().map_or(false, |f| f.aligned()) {
return None;
}
// Just testing the opcode is enough, because the width will always match if
// the type does (and the type should match if the CLIF is properly
// constructed).
if insn_data.opcode() == Opcode::Load {
let offset = insn_data
.load_store_offset()
.expect("load should have offset");
Some((
InsnInput {
insn: src_insn,
input: 0,
},
offset,
))
} else {
None
}
}
/// Put the given input into a register or a memory operand.
/// Effectful: may mark the given input as used, when returning the register form.
fn input_to_reg_mem<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> RegMem {
let inputs = ctx.get_input_as_source_or_const(spec.insn, spec.input);
if let Some(c) = inputs.constant {
// Generate constants fresh at each use to minimize long-range register pressure.
let ty = ctx.input_ty(spec.insn, spec.input);
return RegMem::reg(generate_constant(ctx, ty, c).only_reg().unwrap());
}
if let Some((src_insn, 0)) = inputs.inst {
if let Some((addr_input, offset)) = is_mergeable_load(ctx, src_insn) {
ctx.sink_inst(src_insn);
let amode = lower_to_amode(ctx, addr_input, offset);
return RegMem::mem(amode);
}
}
RegMem::reg(
ctx.put_input_in_regs(spec.insn, spec.input)
.only_reg()
.unwrap(),
)
}
/// An extension specification for `extend_input_to_reg`.
#[derive(Clone, Copy)]
enum ExtSpec {
ZeroExtendTo32,
ZeroExtendTo64,
SignExtendTo32,
#[allow(dead_code)] // not used just yet but may be used in the future!
SignExtendTo64,
}
/// Put the given input into a register, marking it as used, and do a zero- or signed- extension if
/// required. (This obviously causes side-effects.)
fn extend_input_to_reg<C: LowerCtx<I = Inst>>(
ctx: &mut C,
spec: InsnInput,
ext_spec: ExtSpec,
) -> Reg {
let requested_size = match ext_spec {
ExtSpec::ZeroExtendTo32 | ExtSpec::SignExtendTo32 => 32,
ExtSpec::ZeroExtendTo64 | ExtSpec::SignExtendTo64 => 64,
};
let input_size = ctx.input_ty(spec.insn, spec.input).bits();
let requested_ty = if requested_size == 32 {
types::I32
} else {
types::I64
};
let ext_mode = match (input_size, requested_size) {
(a, b) if a == b => return put_input_in_reg(ctx, spec),
(1, 8) => return put_input_in_reg(ctx, spec),
(a, b) => ExtMode::new(a, b).unwrap_or_else(|| panic!("invalid extension: {} -> {}", a, b)),
};
let src = input_to_reg_mem(ctx, spec);
let dst = ctx.alloc_tmp(requested_ty).only_reg().unwrap();
match ext_spec {
ExtSpec::ZeroExtendTo32 | ExtSpec::ZeroExtendTo64 => {
ctx.emit(Inst::movzx_rm_r(ext_mode, src, dst))
}
ExtSpec::SignExtendTo32 | ExtSpec::SignExtendTo64 => {
ctx.emit(Inst::movsx_rm_r(ext_mode, src, dst))
}
}
dst.to_reg()
}
/// Returns whether the given input is an immediate that can be properly sign-extended, without any
/// possible side-effect.
fn non_reg_input_to_sext_imm(input: NonRegInput, input_ty: Type) -> Option<u32> {
input.constant.and_then(|x| {
// For i64 instructions (prefixed with REX.W), require that the immediate will sign-extend
// to 64 bits. For other sizes, it doesn't matter and we can just use the plain
// constant.
if input_ty.bytes() != 8 || low32_will_sign_extend_to_64(x) {
Some(x as u32)
} else {
None
}
})
}
fn input_to_imm<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> Option<u64> {
ctx.get_input_as_source_or_const(spec.insn, spec.input)
.constant
}
/// Put the given input into an immediate, a register or a memory operand.
/// Effectful: may mark the given input as used, when returning the register form.
fn input_to_reg_mem_imm<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> RegMemImm {
let input = ctx.get_input_as_source_or_const(spec.insn, spec.input);
let input_ty = ctx.input_ty(spec.insn, spec.input);
match non_reg_input_to_sext_imm(input, input_ty) {
Some(x) => RegMemImm::imm(x),
None => match input_to_reg_mem(ctx, spec) {
RegMem::Reg { reg } => RegMemImm::reg(reg),
RegMem::Mem { addr } => RegMemImm::mem(addr),
},
}
}
/// Emit an instruction to insert a value `src` into a lane of `dst`.
fn emit_insert_lane<C: LowerCtx<I = Inst>>(
ctx: &mut C,
src: RegMem,
dst: Writable<Reg>,
lane: u8,
ty: Type,
) {
if !ty.is_float() {
let (sse_op, size) = match ty.lane_bits() {
8 => (SseOpcode::Pinsrb, OperandSize::Size32),
16 => (SseOpcode::Pinsrw, OperandSize::Size32),
32 => (SseOpcode::Pinsrd, OperandSize::Size32),
64 => (SseOpcode::Pinsrd, OperandSize::Size64),
_ => panic!("Unable to insertlane for lane size: {}", ty.lane_bits()),
};
ctx.emit(Inst::xmm_rm_r_imm(sse_op, src, dst, lane, size));
} else if ty == types::F32 {
let sse_op = SseOpcode::Insertps;
// Insert 32-bits from replacement (at index 00, bits 7:8) to vector (lane
// shifted into bits 5:6).
let lane = 0b00_00_00_00 | lane << 4;
ctx.emit(Inst::xmm_rm_r_imm(
sse_op,
src,
dst,
lane,
OperandSize::Size32,
));
} else if ty == types::F64 {
let sse_op = match lane {
// Move the lowest quadword in replacement to vector without changing
// the upper bits.
0 => SseOpcode::Movsd,
// Move the low 64 bits of replacement vector to the high 64 bits of the
// vector.
1 => SseOpcode::Movlhps,
_ => unreachable!(),
};
// Here we use the `xmm_rm_r` encoding because it correctly tells the register
// allocator how we are using `dst`: we are using `dst` as a `mod` whereas other
// encoding formats like `xmm_unary_rm_r` treat it as a `def`.
ctx.emit(Inst::xmm_rm_r(sse_op, src, dst));
} else {
panic!("unable to emit insertlane for type: {}", ty)
}
}
/// Emit an instruction to extract a lane of `src` into `dst`.
fn emit_extract_lane<C: LowerCtx<I = Inst>>(
ctx: &mut C,
src: Reg,
dst: Writable<Reg>,
lane: u8,
ty: Type,
) {
if !ty.is_float() {
let (sse_op, size) = match ty.lane_bits() {
8 => (SseOpcode::Pextrb, OperandSize::Size32),
16 => (SseOpcode::Pextrw, OperandSize::Size32),
32 => (SseOpcode::Pextrd, OperandSize::Size32),
64 => (SseOpcode::Pextrd, OperandSize::Size64),
_ => panic!("Unable to extractlane for lane size: {}", ty.lane_bits()),
};
let src = RegMem::reg(src);
ctx.emit(Inst::xmm_rm_r_imm(sse_op, src, dst, lane, size));
} else if ty == types::F32 || ty == types::F64 {
if lane == 0 {
// Remove the extractlane instruction, leaving the float where it is. The upper
// bits will remain unchanged; for correctness, this relies on Cranelift type
// checking to avoid using those bits.
ctx.emit(Inst::gen_move(dst, src, ty));
} else {
// Otherwise, shuffle the bits in `lane` to the lowest lane.
let sse_op = SseOpcode::Pshufd;
let mask = match ty {
// Move the value at `lane` to lane 0, copying existing value at lane 0 to
// other lanes. Again, this relies on Cranelift type checking to avoid
// using those bits.
types::F32 => {
assert!(lane > 0 && lane < 4);
0b00_00_00_00 | lane
}
// Move the value at `lane` 1 (we know it must be 1 because of the `if`
// statement above) to lane 0 and leave lane 1 unchanged. The Cranelift type
// checking assumption also applies here.
types::F64 => {
assert!(lane == 1);
0b11_10_11_10
}
_ => unreachable!(),
};
let src = RegMem::reg(src);
ctx.emit(Inst::xmm_rm_r_imm(
sse_op,
src,
dst,
mask,
OperandSize::Size32,
));
}
} else {
panic!("unable to emit extractlane for type: {}", ty)
}
}
/// Emits an int comparison instruction.
///
/// Note: make sure that there are no instructions modifying the flags between a call to this
/// function and the use of the flags!
///
/// Takes the condition code that will be tested, and returns
/// the condition code that should be used. This allows us to
/// synthesize comparisons out of multiple instructions for
/// special cases (e.g., 128-bit integers).
fn emit_cmp<C: LowerCtx<I = Inst>>(ctx: &mut C, insn: IRInst, cc: IntCC) -> IntCC {
let ty = ctx.input_ty(insn, 0);
let inputs = [InsnInput { insn, input: 0 }, InsnInput { insn, input: 1 }];
if ty == types::I128 {
// We need to compare both halves and combine the results appropriately.
let cmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let cmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let lhs = put_input_in_regs(ctx, inputs[0]);
let lhs_lo = lhs.regs()[0];
let lhs_hi = lhs.regs()[1];
let rhs = put_input_in_regs(ctx, inputs[1]);
let rhs_lo = RegMemImm::reg(rhs.regs()[0]);
let rhs_hi = RegMemImm::reg(rhs.regs()[1]);
match cc {
IntCC::Equal => {
ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_hi, lhs_hi));
ctx.emit(Inst::setcc(CC::Z, cmp1));
ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_lo, lhs_lo));
ctx.emit(Inst::setcc(CC::Z, cmp2));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(cmp1.to_reg()),
cmp2,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::imm(1),
cmp2,
));
IntCC::NotEqual
}
IntCC::NotEqual => {
ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_hi, lhs_hi));
ctx.emit(Inst::setcc(CC::NZ, cmp1));
ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_lo, lhs_lo));
ctx.emit(Inst::setcc(CC::NZ, cmp2));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Or,
RegMemImm::reg(cmp1.to_reg()),
cmp2,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::imm(1),
cmp2,
));
IntCC::NotEqual
}
IntCC::SignedLessThan
| IntCC::SignedLessThanOrEqual
| IntCC::SignedGreaterThan
| IntCC::SignedGreaterThanOrEqual
| IntCC::UnsignedLessThan
| IntCC::UnsignedLessThanOrEqual
| IntCC::UnsignedGreaterThan
| IntCC::UnsignedGreaterThanOrEqual => {
// Result = (lhs_hi <> rhs_hi) ||
// (lhs_hi == rhs_hi && lhs_lo <> rhs_lo)
let cmp3 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_hi, lhs_hi));
ctx.emit(Inst::setcc(CC::from_intcc(cc.without_equal()), cmp1));
ctx.emit(Inst::setcc(CC::Z, cmp2));
ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_lo, lhs_lo));
ctx.emit(Inst::setcc(CC::from_intcc(cc.unsigned()), cmp3));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(cmp2.to_reg()),
cmp3,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Or,
RegMemImm::reg(cmp1.to_reg()),
cmp3,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::imm(1),
cmp3,
));
IntCC::NotEqual
}
_ => panic!("Unhandled IntCC in I128 comparison: {:?}", cc),
}
} else {
// TODO Try to commute the operands (and invert the condition) if one is an immediate.
let lhs = put_input_in_reg(ctx, inputs[0]);
// We force the RHS into a register, and disallow load-op fusion, because we
// do not have a transitive guarantee that this cmp-site will be the sole
// user of the value. Consider: the icmp might be the only user of a load,
// but there may be multiple users of the icmp (e.g. select or bint
// instructions) that each invoke `emit_cmp()`. If we were to allow a load
// to sink to the *latest* one, but other sites did not permit sinking, then
// we would be missing the load for other cmp-sites.
let rhs = put_input_in_reg(ctx, inputs[1]);
// Cranelift's icmp semantics want to compare lhs - rhs, while Intel gives
// us dst - src at the machine instruction level, so invert operands.
ctx.emit(Inst::cmp_rmi_r(
OperandSize::from_ty(ty),
RegMemImm::reg(rhs),
lhs,
));
cc
}
}
/// A specification for a fcmp emission.
enum FcmpSpec {
/// Normal flow.
Normal,
/// Avoid emitting Equal at all costs by inverting it to NotEqual, and indicate when that
/// happens with `InvertedEqualOrConditions`.
///
/// This is useful in contexts where it is hard/inefficient to produce a single instruction (or
/// sequence of instructions) that check for an "AND" combination of condition codes; see for
/// instance lowering of Select.
InvertEqual,
}
/// This explains how to interpret the results of an fcmp instruction.
enum FcmpCondResult {
/// The given condition code must be set.
Condition(CC),
/// Both condition codes must be set.
AndConditions(CC, CC),
/// Either of the conditions codes must be set.
OrConditions(CC, CC),
/// The associated spec was set to `FcmpSpec::InvertEqual` and Equal has been inverted. Either
/// of the condition codes must be set, and the user must invert meaning of analyzing the
/// condition code results. When the spec is set to `FcmpSpec::Normal`, then this case can't be
/// reached.
InvertedEqualOrConditions(CC, CC),
}
/// Emits a float comparison instruction.
///
/// Note: make sure that there are no instructions modifying the flags between a call to this
/// function and the use of the flags!
fn emit_fcmp<C: LowerCtx<I = Inst>>(
ctx: &mut C,
insn: IRInst,
mut cond_code: FloatCC,
spec: FcmpSpec,
) -> FcmpCondResult {
let (flip_operands, inverted_equal) = match cond_code {
FloatCC::LessThan
| FloatCC::LessThanOrEqual
| FloatCC::UnorderedOrGreaterThan
| FloatCC::UnorderedOrGreaterThanOrEqual => {
cond_code = cond_code.reverse();
(true, false)
}
FloatCC::Equal => {
let inverted_equal = match spec {
FcmpSpec::Normal => false,
FcmpSpec::InvertEqual => {
cond_code = FloatCC::NotEqual; // same as .inverse()
true
}
};
(false, inverted_equal)
}
_ => (false, false),
};
// The only valid CC constructed with `from_floatcc` can be put in the flag
// register with a direct float comparison; do this here.
let op = match ctx.input_ty(insn, 0) {
types::F32 => SseOpcode::Ucomiss,
types::F64 => SseOpcode::Ucomisd,
_ => panic!("Bad input type to Fcmp"),
};
let inputs = &[InsnInput { insn, input: 0 }, InsnInput { insn, input: 1 }];
let (lhs_input, rhs_input) = if flip_operands {
(inputs[1], inputs[0])
} else {
(inputs[0], inputs[1])
};
let lhs = put_input_in_reg(ctx, lhs_input);
// See above in `emit_cmp()`. We must only use the reg/reg form of the
// comparison in order to avoid issues with merged loads.
let rhs = put_input_in_reg(ctx, rhs_input);
ctx.emit(Inst::xmm_cmp_rm_r(op, RegMem::reg(rhs), lhs));
let cond_result = match cond_code {
FloatCC::Equal => FcmpCondResult::AndConditions(CC::NP, CC::Z),
FloatCC::NotEqual if inverted_equal => {
FcmpCondResult::InvertedEqualOrConditions(CC::P, CC::NZ)
}
FloatCC::NotEqual if !inverted_equal => FcmpCondResult::OrConditions(CC::P, CC::NZ),
_ => FcmpCondResult::Condition(CC::from_floatcc(cond_code)),
};
cond_result
}
fn emit_bitrev<C: LowerCtx<I = Inst>>(ctx: &mut C, src: Reg, dst: Writable<Reg>, ty: Type) {
let bits = ty.bits();
let const_mask = if bits == 64 {
0xffff_ffff_ffff_ffff
} else {
(1u64 << bits) - 1
};
let tmp0 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
ctx.emit(Inst::gen_move(tmp0, src, types::I64));
// Swap 1-bit units.
// tmp1 = src
ctx.emit(Inst::gen_move(tmp1, tmp0.to_reg(), types::I64));
// tmp2 = 0b0101..
ctx.emit(Inst::imm(
OperandSize::Size64,
0x5555_5555_5555_5555 & const_mask,
tmp2,
));
// tmp1 = src >> 1
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
));
// tmp1 = (src >> 1) & 0b0101..
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(tmp2.to_reg()),
tmp1,
));
// tmp2 = src & 0b0101..
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(tmp0.to_reg()),
tmp2,
));
// tmp2 = (src & 0b0101..) << 1
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
Some(1),
tmp2,
));
// tmp0 = (src >> 1) & 0b0101.. | (src & 0b0101..) << 1
ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Or,
RegMemImm::reg(tmp1.to_reg()),
tmp0,
));
// Swap 2-bit units.
ctx.emit(Inst::gen_move(tmp1, tmp0.to_reg(), types::I64));
ctx.emit(Inst::imm(
OperandSize::Size64,
0x3333_3333_3333_3333 & const_mask,
tmp2,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(2),
tmp1,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(tmp2.to_reg()),
tmp1,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(tmp0.to_reg()),
tmp2,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
Some(2),
tmp2,
));
ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Or,
RegMemImm::reg(tmp1.to_reg()),
tmp0,
));
// Swap 4-bit units.
ctx.emit(Inst::gen_move(tmp1, tmp0.to_reg(), types::I64));
ctx.emit(Inst::imm(
OperandSize::Size64,
0x0f0f_0f0f_0f0f_0f0f & const_mask,
tmp2,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(4),
tmp1,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(tmp2.to_reg()),
tmp1,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(tmp0.to_reg()),
tmp2,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
Some(4),
tmp2,
));
ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Or,
RegMemImm::reg(tmp1.to_reg()),
tmp0,
));
if bits > 8 {
// Swap 8-bit units.
ctx.emit(Inst::gen_move(tmp1, tmp0.to_reg(), types::I64));
ctx.emit(Inst::imm(
OperandSize::Size64,
0x00ff_00ff_00ff_00ff & const_mask,
tmp2,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(8),
tmp1,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(tmp2.to_reg()),
tmp1,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(tmp0.to_reg()),
tmp2,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
Some(8),
tmp2,
));
ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Or,
RegMemImm::reg(tmp1.to_reg()),
tmp0,
));
}
if bits > 16 {
// Swap 16-bit units.
ctx.emit(Inst::gen_move(tmp1, tmp0.to_reg(), types::I64));
ctx.emit(Inst::imm(
OperandSize::Size64,
0x0000_ffff_0000_ffff & const_mask,
tmp2,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(16),
tmp1,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(tmp2.to_reg()),
tmp1,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(tmp0.to_reg()),
tmp2,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
Some(16),
tmp2,
));
ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Or,
RegMemImm::reg(tmp1.to_reg()),
tmp0,
));
}
if bits > 32 {
// Swap 32-bit units.
ctx.emit(Inst::gen_move(tmp1, tmp0.to_reg(), types::I64));
ctx.emit(Inst::imm(
OperandSize::Size64,
0x0000_0000_ffff_ffff & const_mask,
tmp2,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(32),
tmp1,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(tmp2.to_reg()),
tmp1,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(tmp0.to_reg()),
tmp2,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
Some(32),
tmp2,
));
ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Or,
RegMemImm::reg(tmp1.to_reg()),
tmp0,
));
}
ctx.emit(Inst::gen_move(dst, tmp0.to_reg(), types::I64));
}
fn emit_shl_i128<C: LowerCtx<I = Inst>>(
ctx: &mut C,
src: ValueRegs<Reg>,
dst: ValueRegs<Writable<Reg>>,
amt_src: Reg,
) {
let src_lo = src.regs()[0];
let src_hi = src.regs()[1];
let dst_lo = dst.regs()[0];
let dst_hi = dst.regs()[1];
// mov tmp1, src_lo
// shl tmp1, amt_src
// mov tmp2, src_hi
// shl tmp2, amt_src
// mov amt, 64
// sub amt, amt_src
// mov tmp3, src_lo
// shr tmp3, amt
// xor dst_lo, dst_lo
// test amt_src, 127
// cmovz tmp3, dst_lo
// or tmp3, tmp2
// mov amt, amt_src
// and amt, 64
// cmovz dst_hi, tmp3
// cmovz dst_lo, tmp1
// cmovnz dst_hi, tmp1
let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let tmp3 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let amt = ctx.alloc_tmp(types::I64).only_reg().unwrap();
ctx.emit(Inst::gen_move(tmp1, src_lo, types::I64));
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::rcx()),
amt_src,
types::I64,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
None,
tmp1,
));
ctx.emit(Inst::gen_move(tmp2, src_hi, types::I64));
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::rcx()),
amt_src,
types::I64,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
None,
tmp2,
));
ctx.emit(Inst::imm(OperandSize::Size64, 64, amt));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Sub,
RegMemImm::reg(amt_src),
amt,
));
ctx.emit(Inst::gen_move(tmp3, src_lo, types::I64));
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::rcx()),
amt.to_reg(),
types::I64,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
None,
tmp3,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Xor,
RegMemImm::reg(dst_lo.to_reg()),
dst_lo,
));
ctx.emit(Inst::test_rmi_r(
OperandSize::Size64,
RegMemImm::imm(127),
amt_src,
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::Z,
RegMem::reg(dst_lo.to_reg()),
tmp3,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Or,
RegMemImm::reg(tmp2.to_reg()),
tmp3,
));
// This isn't semantically necessary, but it keeps the
// register allocator happy, because it cannot otherwise
// infer that cmovz + cmovnz always defines dst_hi.
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Xor,
RegMemImm::reg(dst_hi.to_reg()),
dst_hi,
));
ctx.emit(Inst::gen_move(amt, amt_src, types::I64));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::imm(64),
amt,
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::Z,
RegMem::reg(tmp3.to_reg()),
dst_hi,
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::Z,
RegMem::reg(tmp1.to_reg()),
dst_lo,
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::NZ,
RegMem::reg(tmp1.to_reg()),
dst_hi,
));
}
fn emit_shr_i128<C: LowerCtx<I = Inst>>(
ctx: &mut C,
src: ValueRegs<Reg>,
dst: ValueRegs<Writable<Reg>>,
amt_src: Reg,
is_signed: bool,
) {
let src_lo = src.regs()[0];
let src_hi = src.regs()[1];
let dst_lo = dst.regs()[0];
let dst_hi = dst.regs()[1];
// mov tmp1, src_hi
// {u,s}shr tmp1, amt_src
// mov tmp2, src_lo
// ushr tmp2, amt_src
// mov amt, 64
// sub amt, amt_src
// mov tmp3, src_hi
// shl tmp3, amt
// xor dst_lo, dst_lo
// test amt_src, 127
// cmovz tmp3, dst_lo
// or tmp3, tmp2
// if is_signed:
// mov dst_hi, src_hi
// sshr dst_hi, 63 // get the sign bit
// else:
// xor dst_hi, dst_hi
// mov amt, amt_src
// and amt, 64
// cmovz dst_hi, tmp1
// cmovz dst_lo, tmp3
// cmovnz dst_lo, tmp1
let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let tmp3 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let amt = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let shift_kind = if is_signed {
ShiftKind::ShiftRightArithmetic
} else {
ShiftKind::ShiftRightLogical
};
ctx.emit(Inst::gen_move(tmp1, src_hi, types::I64));
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::rcx()),
amt_src,
types::I64,
));
ctx.emit(Inst::shift_r(OperandSize::Size64, shift_kind, None, tmp1));
ctx.emit(Inst::gen_move(tmp2, src_lo, types::I64));
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::rcx()),
amt_src,
types::I64,
));
// N.B.: right-shift of *lower* half is *always* unsigned (its MSB is not a sign bit).
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
None,
tmp2,
));
ctx.emit(Inst::imm(OperandSize::Size64, 64, amt));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Sub,
RegMemImm::reg(amt_src),
amt,
));
ctx.emit(Inst::gen_move(tmp3, src_hi, types::I64));
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::rcx()),
amt.to_reg(),
types::I64,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
None,
tmp3,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Xor,
RegMemImm::reg(dst_lo.to_reg()),
dst_lo,
));
ctx.emit(Inst::test_rmi_r(
OperandSize::Size64,
RegMemImm::imm(127),
amt_src,
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::Z,
RegMem::reg(dst_lo.to_reg()),
tmp3,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Or,
RegMemImm::reg(tmp2.to_reg()),
tmp3,
));
if is_signed {
ctx.emit(Inst::gen_move(dst_hi, src_hi, types::I64));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightArithmetic,
Some(63),
dst_hi,
));
} else {
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Xor,
RegMemImm::reg(dst_hi.to_reg()),
dst_hi,
));
}
// This isn't semantically necessary, but it keeps the
// register allocator happy, because it cannot otherwise
// infer that cmovz + cmovnz always defines dst_lo.
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Xor,
RegMemImm::reg(dst_lo.to_reg()),
dst_lo,
));
ctx.emit(Inst::gen_move(amt, amt_src, types::I64));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::imm(64),
amt,
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::Z,
RegMem::reg(tmp1.to_reg()),
dst_hi,
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::Z,
RegMem::reg(tmp3.to_reg()),
dst_lo,
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::NZ,
RegMem::reg(tmp1.to_reg()),
dst_lo,
));
}
fn make_libcall_sig<C: LowerCtx<I = Inst>>(
ctx: &mut C,
insn: IRInst,
call_conv: CallConv,
ptr_ty: Type,
) -> Signature {
let mut sig = Signature::new(call_conv);
for i in 0..ctx.num_inputs(insn) {
sig.params.push(AbiParam::new(ctx.input_ty(insn, i)));
}
for i in 0..ctx.num_outputs(insn) {
sig.returns.push(AbiParam::new(ctx.output_ty(insn, i)));
}
if call_conv.extends_baldrdash() {
// Adds the special VMContext parameter to the signature.
sig.params
.push(AbiParam::special(ptr_ty, ArgumentPurpose::VMContext));
}
sig
}
fn emit_vm_call<C: LowerCtx<I = Inst>>(
ctx: &mut C,
flags: &Flags,
triple: &Triple,
libcall: LibCall,
insn: IRInst,
inputs: SmallVec<[InsnInput; 4]>,
outputs: SmallVec<[InsnOutput; 2]>,
) -> CodegenResult<()> {
let extname = ExternalName::LibCall(libcall);
let dist = if flags.use_colocated_libcalls() {
RelocDistance::Near
} else {
RelocDistance::Far
};
// TODO avoid recreating signatures for every single Libcall function.
let call_conv = CallConv::for_libcall(flags, CallConv::triple_default(triple));
let sig = make_libcall_sig(ctx, insn, call_conv, types::I64);
let caller_conv = ctx.abi().call_conv();
let mut abi = X64ABICaller::from_func(&sig, &extname, dist, caller_conv, flags)?;
abi.emit_stack_pre_adjust(ctx);
let vm_context = if call_conv.extends_baldrdash() { 1 } else { 0 };
assert_eq!(inputs.len() + vm_context, abi.num_args());
for (i, input) in inputs.iter().enumerate() {
let arg_reg = put_input_in_reg(ctx, *input);
abi.emit_copy_regs_to_arg(ctx, i, ValueRegs::one(arg_reg));
}
if call_conv.extends_baldrdash() {
let vm_context_vreg = ctx
.get_vm_context()
.expect("should have a VMContext to pass to libcall funcs");
abi.emit_copy_regs_to_arg(ctx, inputs.len(), ValueRegs::one(vm_context_vreg));
}
abi.emit_call(ctx);
for (i, output) in outputs.iter().enumerate() {
let retval_reg = get_output_reg(ctx, *output).only_reg().unwrap();
abi.emit_copy_retval_to_regs(ctx, i, ValueRegs::one(retval_reg));
}
abi.emit_stack_post_adjust(ctx);
Ok(())
}
/// Returns whether the given input is a shift by a constant value less or equal than 3.
/// The goal is to embed it within an address mode.
fn matches_small_constant_shift<C: LowerCtx<I = Inst>>(
ctx: &mut C,
spec: InsnInput,
) -> Option<(InsnInput, u8)> {
matches_input(ctx, spec, Opcode::Ishl).and_then(|shift| {
match input_to_imm(
ctx,
InsnInput {
insn: shift,
input: 1,
},
) {
Some(shift_amt) if shift_amt <= 3 => Some((
InsnInput {
insn: shift,
input: 0,
},
shift_amt as u8,
)),
_ => None,
}
})
}
/// Lowers an instruction to one of the x86 addressing modes.
///
/// Note: the 32-bit offset in Cranelift has to be sign-extended, which maps x86's behavior.
fn lower_to_amode<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput, offset: i32) -> Amode {
let flags = ctx
.memflags(spec.insn)
.expect("Instruction with amode should have memflags");
// We now either have an add that we must materialize, or some other input; as well as the
// final offset.
if let Some(add) = matches_input(ctx, spec, Opcode::Iadd) {
debug_assert_eq!(ctx.output_ty(add, 0), types::I64);
let add_inputs = &[
InsnInput {
insn: add,
input: 0,
},
InsnInput {
insn: add,
input: 1,
},
];
// TODO heap_addr legalization generates a uext64 *after* the shift, so these optimizations
// aren't happening in the wasm case. We could do better, given some range analysis.
let (base, index, shift) = if let Some((shift_input, shift_amt)) =
matches_small_constant_shift(ctx, add_inputs[0])
{
(
put_input_in_reg(ctx, add_inputs[1]),
put_input_in_reg(ctx, shift_input),
shift_amt,
)
} else if let Some((shift_input, shift_amt)) =
matches_small_constant_shift(ctx, add_inputs[1])
{
(
put_input_in_reg(ctx, add_inputs[0]),
put_input_in_reg(ctx, shift_input),
shift_amt,
)
} else {
for i in 0..=1 {
// Try to pierce through uextend.
if let Some(uextend) = matches_input(
ctx,
InsnInput {
insn: add,
input: i,
},
Opcode::Uextend,
) {
if let Some(cst) = ctx.get_input_as_source_or_const(uextend, 0).constant {
// Zero the upper bits.
let input_size = ctx.input_ty(uextend, 0).bits() as u64;
let shift: u64 = 64 - input_size;
let uext_cst: u64 = (cst << shift) >> shift;
let final_offset = (offset as i64).wrapping_add(uext_cst as i64);
if low32_will_sign_extend_to_64(final_offset as u64) {
let base = put_input_in_reg(ctx, add_inputs[1 - i]);
return Amode::imm_reg(final_offset as u32, base).with_flags(flags);
}
}
}
// If it's a constant, add it directly!
if let Some(cst) = ctx.get_input_as_source_or_const(add, i).constant {
let final_offset = (offset as i64).wrapping_add(cst as i64);
if low32_will_sign_extend_to_64(final_offset as u64) {
let base = put_input_in_reg(ctx, add_inputs[1 - i]);
return Amode::imm_reg(final_offset as u32, base).with_flags(flags);
}
}
}
(
put_input_in_reg(ctx, add_inputs[0]),
put_input_in_reg(ctx, add_inputs[1]),
0,
)
};
return Amode::imm_reg_reg_shift(offset as u32, base, index, shift).with_flags(flags);
}
let input = put_input_in_reg(ctx, spec);
Amode::imm_reg(offset as u32, input).with_flags(flags)
}
fn emit_moves<C: LowerCtx<I = Inst>>(
ctx: &mut C,
dst: ValueRegs<Writable<Reg>>,
src: ValueRegs<Reg>,
ty: Type,
) {
let (_, tys) = Inst::rc_for_type(ty).unwrap();
for ((dst, src), ty) in dst.regs().iter().zip(src.regs().iter()).zip(tys.iter()) {
ctx.emit(Inst::gen_move(*dst, *src, *ty));
}
}
fn emit_cmoves<C: LowerCtx<I = Inst>>(
ctx: &mut C,
size: u8,
cc: CC,
src: ValueRegs<Reg>,
dst: ValueRegs<Writable<Reg>>,
) {
let size = size / src.len() as u8;
let size = u8::max(size, 4); // at least 32 bits
for (dst, src) in dst.regs().iter().zip(src.regs().iter()) {
ctx.emit(Inst::cmove(
OperandSize::from_bytes(size.into()),
cc,
RegMem::reg(*src),
*dst,
));
}
}
fn emit_clz<C: LowerCtx<I = Inst>>(
ctx: &mut C,
orig_ty: Type,
ty: Type,
src: Reg,
dst: Writable<Reg>,
) {
let src = RegMem::reg(src);
let tmp = ctx.alloc_tmp(ty).only_reg().unwrap();
ctx.emit(Inst::imm(OperandSize::from_ty(ty), u64::max_value(), dst));
ctx.emit(Inst::unary_rm_r(
OperandSize::from_ty(ty),
UnaryRmROpcode::Bsr,
src,
tmp,
));
ctx.emit(Inst::cmove(
OperandSize::from_ty(ty),
CC::Z,
RegMem::reg(dst.to_reg()),
tmp,
));
ctx.emit(Inst::imm(
OperandSize::from_ty(ty),
orig_ty.bits() as u64 - 1,
dst,
));
ctx.emit(Inst::alu_rmi_r(
if ty == types::I64 {
OperandSize::Size64
} else {
OperandSize::Size32
},
AluRmiROpcode::Sub,
RegMemImm::reg(tmp.to_reg()),
dst,
));
}
fn emit_ctz<C: LowerCtx<I = Inst>>(
ctx: &mut C,
orig_ty: Type,
ty: Type,
src: Reg,
dst: Writable<Reg>,
) {
let src = RegMem::reg(src);
let tmp = ctx.alloc_tmp(ty).only_reg().unwrap();
ctx.emit(Inst::imm(OperandSize::Size32, orig_ty.bits() as u64, tmp));
ctx.emit(Inst::unary_rm_r(
OperandSize::from_ty(ty),
UnaryRmROpcode::Bsf,
src,
dst,
));
ctx.emit(Inst::cmove(
OperandSize::from_ty(ty),
CC::Z,
RegMem::reg(tmp.to_reg()),
dst,
));
}
//=============================================================================
// Top-level instruction lowering entry point, for one instruction.
/// Actually codegen an instruction's results into registers.
fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
ctx: &mut C,
insn: IRInst,
flags: &Flags,
isa_flags: &x64_settings::Flags,
triple: &Triple,
) -> CodegenResult<()> {
let op = ctx.data(insn).opcode();
let inputs: SmallVec<[InsnInput; 4]> = (0..ctx.num_inputs(insn))
.map(|i| InsnInput { insn, input: i })
.collect();
let outputs: SmallVec<[InsnOutput; 2]> = (0..ctx.num_outputs(insn))
.map(|i| InsnOutput { insn, output: i })
.collect();
let ty = if outputs.len() > 0 {
Some(ctx.output_ty(insn, 0))
} else {
None
};
if let Ok(()) = isle::lower(ctx, isa_flags, &outputs, insn) {
return Ok(());
}
let implemented_in_isle = |ctx: &mut C| {
unreachable!(
"implemented in ISLE: inst = `{}`, type = `{:?}`",
ctx.dfg().display_inst(insn),
ty
)
};
match op {
Opcode::Iconst
| Opcode::Bconst
| Opcode::Null
| Opcode::Iadd
| Opcode::IaddIfcout
| Opcode::SaddSat
| Opcode::UaddSat
| Opcode::Isub
| Opcode::SsubSat
| Opcode::UsubSat
| Opcode::AvgRound
| Opcode::Band
| Opcode::Bor
| Opcode::Bxor
| Opcode::Imul
| Opcode::BandNot
| Opcode::Iabs
| Opcode::Imax
| Opcode::Umax
| Opcode::Imin
| Opcode::Umin
| Opcode::Bnot => implemented_in_isle(ctx),
Opcode::Bitselect => {
let ty = ty.unwrap();
let condition = put_input_in_reg(ctx, inputs[0]);
let if_true = put_input_in_reg(ctx, inputs[1]);
let if_false = input_to_reg_mem(ctx, inputs[2]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
if ty.is_vector() {
let tmp1 = ctx.alloc_tmp(ty).only_reg().unwrap();
ctx.emit(Inst::gen_move(tmp1, if_true, ty));
ctx.emit(Inst::and(ty, RegMem::reg(condition.clone()), tmp1));
let tmp2 = ctx.alloc_tmp(ty).only_reg().unwrap();
ctx.emit(Inst::gen_move(tmp2, condition, ty));
ctx.emit(Inst::and_not(ty, if_false, tmp2));
ctx.emit(Inst::gen_move(dst, tmp2.to_reg(), ty));
ctx.emit(Inst::or(ty, RegMem::from(tmp1), dst));
} else {
unimplemented!("no lowering for scalar bitselect instruction")
}
}
Opcode::Vselect => {
let ty = ty.unwrap();
let condition = put_input_in_reg(ctx, inputs[0]);
let condition_ty = ctx.input_ty(insn, 0);
let if_true = input_to_reg_mem(ctx, inputs[1]);
let if_false = put_input_in_reg(ctx, inputs[2]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
if ty.is_vector() {
// `vselect` relies on the bit representation of the condition:
// vector boolean types are defined in Cranelift to be all 1s or
// all 0s. This lowering relies on that fact to use x86's
// variable blend instructions, which look at the _high_bit_ of
// the condition mask. All the bits of vector booleans will
// match (all 1s or all 0s), so we can just use the high bit.
assert!(condition_ty.lane_type().is_bool());
// Variable blend instructions expect the condition mask to be
// in XMM0.
let xmm0 = Writable::from_reg(regs::xmm0());
ctx.emit(Inst::gen_move(xmm0, condition, ty));
// Match up the source and destination registers for regalloc.
ctx.emit(Inst::gen_move(dst, if_false, ty));
// Technically PBLENDVB would work in all cases (since the bytes
// inside the mask will be all 1s or 0s we can blend
// byte-by-byte instead of word-by-word, e.g.) but
// type-specialized versions are included here for clarity when
// troubleshooting and due to slight improvements in
// latency/throughput on certain processor families.
let opcode = match condition_ty {
types::B64X2 => SseOpcode::Blendvpd,
types::B32X4 => SseOpcode::Blendvps,
types::B16X8 | types::B8X16 => SseOpcode::Pblendvb,
_ => unimplemented!("unable lower vselect for type: {}", condition_ty),
};
ctx.emit(Inst::xmm_rm_r(opcode, if_true, dst));
} else {
unimplemented!("no lowering for scalar vselect instruction")
}
}
Opcode::Ishl | Opcode::Ushr | Opcode::Sshr | Opcode::Rotl | Opcode::Rotr => {
let dst_ty = ctx.output_ty(insn, 0);
debug_assert_eq!(ctx.input_ty(insn, 0), dst_ty);
if !dst_ty.is_vector() && dst_ty.bits() <= 64 {
// Scalar shifts on x86 have various encodings:
// - shift by one bit, e.g. `SAL r/m8, 1` (not used here)
// - shift by an immediate amount, e.g. `SAL r/m8, imm8`
// - shift by a dynamic amount but only from the CL register, e.g. `SAL r/m8, CL`.
// This implementation uses the last two encoding methods.
let (size, lhs) = match dst_ty {
types::I8 | types::I16 => match op {
Opcode::Ishl => (OperandSize::Size32, put_input_in_reg(ctx, inputs[0])),
Opcode::Ushr => (
OperandSize::Size32,
extend_input_to_reg(ctx, inputs[0], ExtSpec::ZeroExtendTo32),
),
Opcode::Sshr => (
OperandSize::Size32,
extend_input_to_reg(ctx, inputs[0], ExtSpec::SignExtendTo32),
),
Opcode::Rotl | Opcode::Rotr => (
OperandSize::from_ty(dst_ty),
put_input_in_reg(ctx, inputs[0]),
),
_ => unreachable!(),
},
types::I32 | types::I64 => (
OperandSize::from_ty(dst_ty),
put_input_in_reg(ctx, inputs[0]),
),
_ => unreachable!("unhandled output type for shift/rotates: {}", dst_ty),
};
let (count, rhs) =
if let Some(cst) = ctx.get_input_as_source_or_const(insn, 1).constant {
// Mask count, according to Cranelift's semantics.
let cst = (cst as u8) & (dst_ty.bits() as u8 - 1);
(Some(cst), None)
} else {
// We can ignore upper registers if shift amount is multi-reg, because we
// are taking the shift amount mod 2^(lhs_width) anyway.
(None, Some(put_input_in_regs(ctx, inputs[1]).regs()[0]))
};
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let shift_kind = match op {
Opcode::Ishl => ShiftKind::ShiftLeft,
Opcode::Ushr => ShiftKind::ShiftRightLogical,
Opcode::Sshr => ShiftKind::ShiftRightArithmetic,
Opcode::Rotl => ShiftKind::RotateLeft,
Opcode::Rotr => ShiftKind::RotateRight,
_ => unreachable!(),
};
let w_rcx = Writable::from_reg(regs::rcx());
ctx.emit(Inst::mov_r_r(OperandSize::Size64, lhs, dst));
if count.is_none() {
ctx.emit(Inst::mov_r_r(OperandSize::Size64, rhs.unwrap(), w_rcx));
}
ctx.emit(Inst::shift_r(size, shift_kind, count, dst));
} else if dst_ty == types::I128 {
let amt_src = put_input_in_regs(ctx, inputs[1]).regs()[0];
let src = put_input_in_regs(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
match op {
Opcode::Ishl => {
emit_shl_i128(ctx, src, dst, amt_src);
}
Opcode::Ushr => {
emit_shr_i128(ctx, src, dst, amt_src, /* is_signed = */ false);
}
Opcode::Sshr => {
emit_shr_i128(ctx, src, dst, amt_src, /* is_signed = */ true);
}
Opcode::Rotl => {
// (mov tmp, src)
// (shl.i128 tmp, amt)
// (mov dst, src)
// (ushr.i128 dst, 128-amt)
// (or dst, tmp)
let tmp = ctx.alloc_tmp(types::I128);
emit_shl_i128(ctx, src, tmp, amt_src);
let inv_amt = ctx.alloc_tmp(types::I64).only_reg().unwrap();
ctx.emit(Inst::imm(OperandSize::Size64, 128, inv_amt));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Sub,
RegMemImm::reg(amt_src),
inv_amt,
));
emit_shr_i128(
ctx,
src,
dst,
inv_amt.to_reg(),
/* is_signed = */ false,
);
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Or,
RegMemImm::reg(tmp.regs()[0].to_reg()),
dst.regs()[0],
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Or,
RegMemImm::reg(tmp.regs()[1].to_reg()),
dst.regs()[1],
));
}
Opcode::Rotr => {
// (mov tmp, src)
// (ushr.i128 tmp, amt)
// (mov dst, src)
// (shl.i128 dst, 128-amt)
// (or dst, tmp)
let tmp = ctx.alloc_tmp(types::I128);
emit_shr_i128(ctx, src, tmp, amt_src, /* is_signed = */ false);
let inv_amt = ctx.alloc_tmp(types::I64).only_reg().unwrap();
ctx.emit(Inst::imm(OperandSize::Size64, 128, inv_amt));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Sub,
RegMemImm::reg(amt_src),
inv_amt,
));
emit_shl_i128(ctx, src, dst, inv_amt.to_reg());
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Or,
RegMemImm::reg(tmp.regs()[0].to_reg()),
dst.regs()[0],
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Or,
RegMemImm::reg(tmp.regs()[1].to_reg()),
dst.regs()[1],
));
}
_ => unreachable!(),
}
} else if dst_ty == types::I8X16 && (op == Opcode::Ishl || op == Opcode::Ushr) {
// Since the x86 instruction set does not have any 8x16 shift instructions (even in higher feature sets
// like AVX), we lower the `ishl.i8x16` and `ushr.i8x16` to a sequence of instructions. The basic idea,
// whether the `shift_by` amount is an immediate or not, is to use a 16x8 shift and then mask off the
// incorrect bits to 0s (see below for handling signs in `sshr.i8x16`).
let src = put_input_in_reg(ctx, inputs[0]);
let shift_by = input_to_reg_mem_imm(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
// If necessary, move the shift index into the lowest bits of a vector register.
let shift_by_moved = match &shift_by {
RegMemImm::Imm { .. } => shift_by.clone(),
RegMemImm::Reg { reg } => {
let tmp_shift_by = ctx.alloc_tmp(dst_ty).only_reg().unwrap();
ctx.emit(Inst::gpr_to_xmm(
SseOpcode::Movd,
RegMem::reg(*reg),
OperandSize::Size32,
tmp_shift_by,
));
RegMemImm::reg(tmp_shift_by.to_reg())
}
RegMemImm::Mem { .. } => unimplemented!("load shift amount to XMM register"),
};
// Shift `src` using 16x8. Unfortunately, a 16x8 shift will only be correct for half of the lanes;
// the others must be fixed up with the mask below.
let shift_opcode = match op {
Opcode::Ishl => SseOpcode::Psllw,
Opcode::Ushr => SseOpcode::Psrlw,
_ => unimplemented!("{} is not implemented for type {}", op, dst_ty),
};
ctx.emit(Inst::gen_move(dst, src, dst_ty));
ctx.emit(Inst::xmm_rmi_reg(shift_opcode, shift_by_moved, dst));
// Choose which mask to use to fixup the shifted lanes. Since we must use a 16x8 shift, we need to fix
// up the bits that migrate from one half of the lane to the other. Each 16-byte mask (which rustfmt
// forces to multiple lines) is indexed by the shift amount: e.g. if we shift right by 0 (no movement),
// we want to retain all the bits so we mask with `0xff`; if we shift right by 1, we want to retain all
// bits except the MSB so we mask with `0x7f`; etc.
const USHR_MASKS: [u8; 128] = [
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f,
0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x1f, 0x1f, 0x1f, 0x1f,
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
];
const SHL_MASKS: [u8; 128] = [
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe,
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc,
0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xf8, 0xf8, 0xf8, 0xf8,
0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf0,
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
0xf0, 0xf0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0,
0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0,
0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
];
let mask = match op {
Opcode::Ishl => &SHL_MASKS,
Opcode::Ushr => &USHR_MASKS,
_ => unimplemented!("{} is not implemented for type {}", op, dst_ty),
};
// Figure out the address of the shift mask.
let mask_address = match shift_by {
RegMemImm::Imm { simm32 } => {
// When the shift amount is known, we can statically (i.e. at compile time) determine the mask to
// use and only emit that.
debug_assert!(simm32 < 8);
let mask_offset = simm32 as usize * 16;
let mask_constant = ctx.use_constant(VCodeConstantData::WellKnown(
&mask[mask_offset..mask_offset + 16],
));
SyntheticAmode::ConstantOffset(mask_constant)
}
RegMemImm::Reg { reg } => {
// Otherwise, we must emit the entire mask table and dynamically (i.e. at run time) find the correct
// mask offset in the table. We do this use LEA to find the base address of the mask table and then
// complex addressing to offset to the right mask: `base_address + shift_by * 4`
let base_mask_address = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let mask_offset = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let mask_constant = ctx.use_constant(VCodeConstantData::WellKnown(mask));
ctx.emit(Inst::lea(
SyntheticAmode::ConstantOffset(mask_constant),
base_mask_address,
));
ctx.emit(Inst::gen_move(mask_offset, reg, types::I64));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
Some(4),
mask_offset,
));
Amode::imm_reg_reg_shift(
0,
base_mask_address.to_reg(),
mask_offset.to_reg(),
0,
)
.into()
}
RegMemImm::Mem { addr: _ } => unimplemented!("load mask address"),
};
// Load the mask into a temporary register, `mask_value`.
let mask_value = ctx.alloc_tmp(dst_ty).only_reg().unwrap();
ctx.emit(Inst::load(dst_ty, mask_address, mask_value, ExtKind::None));
// Remove the bits that would have disappeared in a true 8x16 shift. TODO in the future,
// this AND instruction could be coalesced with the load above.
let sse_op = match dst_ty {
types::F32X4 => SseOpcode::Andps,
types::F64X2 => SseOpcode::Andpd,
_ => SseOpcode::Pand,
};
ctx.emit(Inst::xmm_rm_r(sse_op, RegMem::from(mask_value), dst));
} else if dst_ty == types::I8X16 && op == Opcode::Sshr {
// Since the x86 instruction set does not have an 8x16 shift instruction and the approach used for
// `ishl` and `ushr` cannot be easily used (the masks do not preserve the sign), we use a different
// approach here: separate the low and high lanes, shift them separately, and merge them into the final
// result. Visually, this looks like the following, where `src.i8x16 = [s0, s1, ..., s15]:
// low.i16x8 = [(s0, s0), (s1, s1), ..., (s7, s7)]
// shifted_low.i16x8 = shift each lane of `low`
// high.i16x8 = [(s8, s8), (s9, s9), ..., (s15, s15)]
// shifted_high.i16x8 = shift each lane of `high`
// dst.i8x16 = [s0'', s1'', ..., s15'']
let src = put_input_in_reg(ctx, inputs[0]);
let shift_by = input_to_reg_mem_imm(ctx, inputs[1]);
let shift_by_ty = ctx.input_ty(insn, 1);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
// In order for PACKSSWB later to only use the high byte of each 16x8 lane, we shift right an extra 8
// bits, relying on PSRAW to fill in the upper bits appropriately.
let bigger_shift_by = match shift_by {
// When we know the shift amount at compile time, we add the extra shift amount statically.
RegMemImm::Imm { simm32 } => RegMemImm::imm(simm32 + 8),
// Otherwise we add instructions to add the extra shift amount and move the value into an XMM
// register.
RegMemImm::Reg { reg } => {
let bigger_shift_by_gpr = ctx.alloc_tmp(shift_by_ty).only_reg().unwrap();
ctx.emit(Inst::mov_r_r(OperandSize::Size64, reg, bigger_shift_by_gpr));
let size = if shift_by_ty == types::I64 {
OperandSize::Size64
} else {
OperandSize::Size32
};
let imm = RegMemImm::imm(8);
ctx.emit(Inst::alu_rmi_r(
size,
AluRmiROpcode::Add,
imm,
bigger_shift_by_gpr,
));
let bigger_shift_by_xmm = ctx.alloc_tmp(dst_ty).only_reg().unwrap();
ctx.emit(Inst::gpr_to_xmm(
SseOpcode::Movd,
RegMem::from(bigger_shift_by_gpr),
OperandSize::Size32,
bigger_shift_by_xmm,
));
RegMemImm::reg(bigger_shift_by_xmm.to_reg())
}
RegMemImm::Mem { .. } => unimplemented!("load shift amount to XMM register"),
};
// Unpack and shift the lower lanes of `src` into the `dst` register.
ctx.emit(Inst::gen_move(dst, src, dst_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Punpcklbw, RegMem::from(dst), dst));
ctx.emit(Inst::xmm_rmi_reg(
SseOpcode::Psraw,
bigger_shift_by.clone(),
dst,
));
// Unpack and shift the upper lanes of `src` into a temporary register, `upper_lanes`.
let upper_lanes = ctx.alloc_tmp(dst_ty).only_reg().unwrap();
ctx.emit(Inst::gen_move(upper_lanes, src, dst_ty));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Punpckhbw,
RegMem::from(upper_lanes),
upper_lanes,
));
ctx.emit(Inst::xmm_rmi_reg(
SseOpcode::Psraw,
bigger_shift_by,
upper_lanes,
));
// Merge the upper and lower shifted lanes into `dst`.
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Packsswb,
RegMem::from(upper_lanes),
dst,
));
} else if dst_ty == types::I64X2 && op == Opcode::Sshr {
// The `sshr.i8x16` CLIF instruction has no single x86 instruction in the older feature sets; newer ones
// like AVX512VL + AVX512F include VPSRAQ, a 128-bit instruction that would fit here, but this backend
// does not currently have support for EVEX encodings (TODO when EVEX support is available, add an
// alternate lowering here). To remedy this, we extract each 64-bit lane to a GPR, shift each using a
// scalar instruction, and insert the shifted values back in the `dst` XMM register.
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::gen_move(dst, src, dst_ty));
// Extract the upper and lower lanes into temporary GPRs.
let lower_lane = ctx.alloc_tmp(types::I64).only_reg().unwrap();
emit_extract_lane(ctx, src, lower_lane, 0, types::I64);
let upper_lane = ctx.alloc_tmp(types::I64).only_reg().unwrap();
emit_extract_lane(ctx, src, upper_lane, 1, types::I64);
// Shift each value.
let mut shift = |reg: Writable<Reg>| {
let kind = ShiftKind::ShiftRightArithmetic;
if let Some(shift_by) = ctx.get_input_as_source_or_const(insn, 1).constant {
// Mask the shift amount according to Cranelift's semantics.
let shift_by = (shift_by as u8) & (types::I64.bits() as u8 - 1);
ctx.emit(Inst::shift_r(
OperandSize::Size64,
kind,
Some(shift_by),
reg,
));
} else {
let dynamic_shift_by = put_input_in_reg(ctx, inputs[1]);
let w_rcx = Writable::from_reg(regs::rcx());
ctx.emit(Inst::mov_r_r(OperandSize::Size64, dynamic_shift_by, w_rcx));
ctx.emit(Inst::shift_r(OperandSize::Size64, kind, None, reg));
};
};
shift(lower_lane);
shift(upper_lane);
// Insert the scalar values back into the `dst` vector.
emit_insert_lane(ctx, RegMem::from(lower_lane), dst, 0, types::I64);
emit_insert_lane(ctx, RegMem::from(upper_lane), dst, 1, types::I64);
} else {
// For the remaining packed shifts not covered above, x86 has implementations that can either:
// - shift using an immediate
// - shift using a dynamic value given in the lower bits of another XMM register.
let src = put_input_in_reg(ctx, inputs[0]);
let shift_by = input_to_reg_mem_imm(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let sse_op = match dst_ty {
types::I16X8 => match op {
Opcode::Ishl => SseOpcode::Psllw,
Opcode::Ushr => SseOpcode::Psrlw,
Opcode::Sshr => SseOpcode::Psraw,
_ => unimplemented!("{} is not implemented for type {}", op, dst_ty),
},
types::I32X4 => match op {
Opcode::Ishl => SseOpcode::Pslld,
Opcode::Ushr => SseOpcode::Psrld,
Opcode::Sshr => SseOpcode::Psrad,
_ => unimplemented!("{} is not implemented for type {}", op, dst_ty),
},
types::I64X2 => match op {
Opcode::Ishl => SseOpcode::Psllq,
Opcode::Ushr => SseOpcode::Psrlq,
_ => unimplemented!("{} is not implemented for type {}", op, dst_ty),
},
_ => unreachable!(),
};
// If necessary, move the shift index into the lowest bits of a vector register.
let shift_by = match shift_by {
RegMemImm::Imm { .. } => shift_by,
RegMemImm::Reg { reg } => {
let tmp_shift_by = ctx.alloc_tmp(dst_ty).only_reg().unwrap();
ctx.emit(Inst::gpr_to_xmm(
SseOpcode::Movd,
RegMem::reg(reg),
OperandSize::Size32,
tmp_shift_by,
));
RegMemImm::reg(tmp_shift_by.to_reg())
}
RegMemImm::Mem { .. } => unimplemented!("load shift amount to XMM register"),
};
// Move the `src` to the same register as `dst`.
ctx.emit(Inst::gen_move(dst, src, dst_ty));
ctx.emit(Inst::xmm_rmi_reg(sse_op, shift_by, dst));
}
}
Opcode::Ineg => {
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let ty = ty.unwrap();
if ty.is_vector() {
// Zero's out a register and then does a packed subtraction
// of the input from the register.
let src = input_to_reg_mem(ctx, inputs[0]);
let tmp = ctx.alloc_tmp(types::I32X4).only_reg().unwrap();
let subtract_opcode = match ty {
types::I8X16 => SseOpcode::Psubb,
types::I16X8 => SseOpcode::Psubw,
types::I32X4 => SseOpcode::Psubd,
types::I64X2 => SseOpcode::Psubq,
_ => panic!("Unsupported type for Ineg instruction, found {}", ty),
};
// Note we must zero out a tmp instead of using the destination register since
// the desitnation could be an alias for the source input register
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pxor,
RegMem::reg(tmp.to_reg()),
tmp,
));
ctx.emit(Inst::xmm_rm_r(subtract_opcode, src, tmp));
ctx.emit(Inst::xmm_unary_rm_r(
SseOpcode::Movapd,
RegMem::reg(tmp.to_reg()),
dst,
));
} else {
let src = put_input_in_reg(ctx, inputs[0]);
ctx.emit(Inst::gen_move(dst, src, ty));
ctx.emit(Inst::neg(OperandSize::from_ty(ty), dst));
}
}
Opcode::Clz => {
let orig_ty = ty.unwrap();
if isa_flags.use_lzcnt() && (orig_ty == types::I32 || orig_ty == types::I64) {
// We can use a plain lzcnt instruction here. Note no special handling is required
// for zero inputs, because the machine instruction does what the CLIF expects for
// zero, i.e. it returns zero.
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::unary_rm_r(
OperandSize::from_ty(orig_ty),
UnaryRmROpcode::Lzcnt,
src,
dst,
));
return Ok(());
}
// General formula using bit-scan reverse (BSR):
// mov -1, %dst
// bsr %src, %tmp
// cmovz %dst, %tmp
// mov $(size_bits - 1), %dst
// sub %tmp, %dst
if orig_ty == types::I128 {
// clz upper, tmp1
// clz lower, dst
// add dst, 64
// cmp tmp1, 64
// cmovnz tmp1, dst
let dsts = get_output_reg(ctx, outputs[0]);
let dst = dsts.regs()[0];
let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let srcs = put_input_in_regs(ctx, inputs[0]);
let src_lo = srcs.regs()[0];
let src_hi = srcs.regs()[1];
emit_clz(ctx, types::I64, types::I64, src_hi, tmp1);
emit_clz(ctx, types::I64, types::I64, src_lo, dst);
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Add,
RegMemImm::imm(64),
dst,
));
ctx.emit(Inst::cmp_rmi_r(
OperandSize::Size64,
RegMemImm::imm(64),
tmp1.to_reg(),
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::NZ,
RegMem::reg(tmp1.to_reg()),
dst,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Xor,
RegMemImm::reg(dsts.regs()[1].to_reg()),
dsts.regs()[1],
));
} else {
let (ext_spec, ty) = match orig_ty {
types::I8 | types::I16 => (Some(ExtSpec::ZeroExtendTo32), types::I32),
a if a == types::I32 || a == types::I64 => (None, a),
_ => unreachable!(),
};
let src = if let Some(ext_spec) = ext_spec {
extend_input_to_reg(ctx, inputs[0], ext_spec)
} else {
put_input_in_reg(ctx, inputs[0])
};
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
emit_clz(ctx, orig_ty, ty, src, dst);
}
}
Opcode::Ctz => {
let orig_ty = ctx.input_ty(insn, 0);
if isa_flags.use_bmi1() && (orig_ty == types::I32 || orig_ty == types::I64) {
// We can use a plain tzcnt instruction here. Note no special handling is required
// for zero inputs, because the machine instruction does what the CLIF expects for
// zero, i.e. it returns zero.
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::unary_rm_r(
OperandSize::from_ty(orig_ty),
UnaryRmROpcode::Tzcnt,
src,
dst,
));
return Ok(());
}
// General formula using bit-scan forward (BSF):
// bsf %src, %dst
// mov $(size_bits), %tmp
// cmovz %tmp, %dst
if orig_ty == types::I128 {
// ctz src_lo, dst
// ctz src_hi, tmp1
// add tmp1, 64
// cmp dst, 64
// cmovz tmp1, dst
let dsts = get_output_reg(ctx, outputs[0]);
let dst = dsts.regs()[0];
let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let srcs = put_input_in_regs(ctx, inputs[0]);
let src_lo = srcs.regs()[0];
let src_hi = srcs.regs()[1];
emit_ctz(ctx, types::I64, types::I64, src_lo, dst);
emit_ctz(ctx, types::I64, types::I64, src_hi, tmp1);
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Add,
RegMemImm::imm(64),
tmp1,
));
ctx.emit(Inst::cmp_rmi_r(
OperandSize::Size64,
RegMemImm::imm(64),
dst.to_reg(),
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::Z,
RegMem::reg(tmp1.to_reg()),
dst,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Xor,
RegMemImm::reg(dsts.regs()[1].to_reg()),
dsts.regs()[1],
));
} else {
let ty = if orig_ty.bits() < 32 {
types::I32
} else {
orig_ty
};
debug_assert!(ty == types::I32 || ty == types::I64);
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
emit_ctz(ctx, orig_ty, ty, src, dst);
}
}
Opcode::Popcnt => {
let ty_tmp = ty.unwrap();
if !ty_tmp.is_vector() {
let (ext_spec, ty) = match ctx.input_ty(insn, 0) {
types::I8 | types::I16 => (Some(ExtSpec::ZeroExtendTo32), types::I32),
a if a == types::I32 || a == types::I64 || a == types::I128 => (None, a),
_ => unreachable!(),
};
if isa_flags.use_popcnt() {
match ty {
types::I32 | types::I64 => {
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::unary_rm_r(
OperandSize::from_ty(ty),
UnaryRmROpcode::Popcnt,
src,
dst,
));
return Ok(());
}
types::I128 => {
// The number of ones in a 128-bits value is the plain sum of the number of
// ones in its low and high parts. No risk of overflow here.
let dsts = get_output_reg(ctx, outputs[0]);
let dst = dsts.regs()[0];
let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let srcs = put_input_in_regs(ctx, inputs[0]);
let src_lo = srcs.regs()[0];
let src_hi = srcs.regs()[1];
ctx.emit(Inst::unary_rm_r(
OperandSize::Size64,
UnaryRmROpcode::Popcnt,
RegMem::reg(src_lo),
dst,
));
ctx.emit(Inst::unary_rm_r(
OperandSize::Size64,
UnaryRmROpcode::Popcnt,
RegMem::reg(src_hi),
tmp,
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Add,
RegMemImm::reg(tmp.to_reg()),
dst,
));
// Zero the result's high component.
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Xor,
RegMemImm::reg(dsts.regs()[1].to_reg()),
dsts.regs()[1],
));
return Ok(());
}
_ => {}
}
}
let (srcs, ty): (SmallVec<[RegMem; 2]>, Type) = if let Some(ext_spec) = ext_spec {
(
smallvec![RegMem::reg(extend_input_to_reg(ctx, inputs[0], ext_spec))],
ty,
)
} else if ty == types::I128 {
let regs = put_input_in_regs(ctx, inputs[0]);
(
smallvec![RegMem::reg(regs.regs()[0]), RegMem::reg(regs.regs()[1])],
types::I64,
)
} else {
// N.B.: explicitly put input in a reg here because the width of the instruction
// into which this RM op goes may not match the width of the input type (in fact,
// it won't for i32.popcnt), and we don't want a larger than necessary load.
(smallvec![RegMem::reg(put_input_in_reg(ctx, inputs[0]))], ty)
};
let mut dsts: SmallVec<[Reg; 2]> = smallvec![];
for src in srcs {
let dst = ctx.alloc_tmp(types::I64).only_reg().unwrap();
dsts.push(dst.to_reg());
if ty == types::I64 {
let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let cst = ctx.alloc_tmp(types::I64).only_reg().unwrap();
// mov src, tmp1
ctx.emit(Inst::mov64_rm_r(src.clone(), tmp1));
// shr $1, tmp1
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
));
// mov 0x7777_7777_7777_7777, cst
ctx.emit(Inst::imm(OperandSize::Size64, 0x7777777777777777, cst));
// andq cst, tmp1
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(cst.to_reg()),
tmp1,
));
// mov src, tmp2
ctx.emit(Inst::mov64_rm_r(src, tmp2));
// sub tmp1, tmp2
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Sub,
RegMemImm::reg(tmp1.to_reg()),
tmp2,
));
// shr $1, tmp1
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
));
// and cst, tmp1
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(cst.to_reg()),
tmp1,
));
// sub tmp1, tmp2
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Sub,
RegMemImm::reg(tmp1.to_reg()),
tmp2,
));
// shr $1, tmp1
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
));
// and cst, tmp1
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(cst.to_reg()),
tmp1,
));
// sub tmp1, tmp2
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Sub,
RegMemImm::reg(tmp1.to_reg()),
tmp2,
));
// mov tmp2, dst
ctx.emit(Inst::mov64_rm_r(RegMem::reg(tmp2.to_reg()), dst));
// shr $4, dst
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(4),
dst,
));
// add tmp2, dst
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Add,
RegMemImm::reg(tmp2.to_reg()),
dst,
));
// mov $0x0F0F_0F0F_0F0F_0F0F, cst
ctx.emit(Inst::imm(OperandSize::Size64, 0x0F0F0F0F0F0F0F0F, cst));
// and cst, dst
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::reg(cst.to_reg()),
dst,
));
// mov $0x0101_0101_0101_0101, cst
ctx.emit(Inst::imm(OperandSize::Size64, 0x0101010101010101, cst));
// mul cst, dst
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Mul,
RegMemImm::reg(cst.to_reg()),
dst,
));
// shr $56, dst
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(56),
dst,
));
} else {
assert_eq!(ty, types::I32);
let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
// mov src, tmp1
ctx.emit(Inst::mov64_rm_r(src.clone(), tmp1));
// shr $1, tmp1
ctx.emit(Inst::shift_r(
OperandSize::Size32,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
));
// andq $0x7777_7777, tmp1
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size32,
AluRmiROpcode::And,
RegMemImm::imm(0x77777777),
tmp1,
));
// mov src, tmp2
ctx.emit(Inst::mov64_rm_r(src, tmp2));
// sub tmp1, tmp2
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size32,
AluRmiROpcode::Sub,
RegMemImm::reg(tmp1.to_reg()),
tmp2,
));
// shr $1, tmp1
ctx.emit(Inst::shift_r(
OperandSize::Size32,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
));
// and 0x7777_7777, tmp1
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size32,
AluRmiROpcode::And,
RegMemImm::imm(0x77777777),
tmp1,
));
// sub tmp1, tmp2
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size32,
AluRmiROpcode::Sub,
RegMemImm::reg(tmp1.to_reg()),
tmp2,
));
// shr $1, tmp1
ctx.emit(Inst::shift_r(
OperandSize::Size32,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
));
// and $0x7777_7777, tmp1
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size32,
AluRmiROpcode::And,
RegMemImm::imm(0x77777777),
tmp1,
));
// sub tmp1, tmp2
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size32,
AluRmiROpcode::Sub,
RegMemImm::reg(tmp1.to_reg()),
tmp2,
));
// mov tmp2, dst
ctx.emit(Inst::mov64_rm_r(RegMem::reg(tmp2.to_reg()), dst));
// shr $4, dst
ctx.emit(Inst::shift_r(
OperandSize::Size32,
ShiftKind::ShiftRightLogical,
Some(4),
dst,
));
// add tmp2, dst
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size32,
AluRmiROpcode::Add,
RegMemImm::reg(tmp2.to_reg()),
dst,
));
// and $0x0F0F_0F0F, dst
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size32,
AluRmiROpcode::And,
RegMemImm::imm(0x0F0F0F0F),
dst,
));
// mul $0x0101_0101, dst
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size32,
AluRmiROpcode::Mul,
RegMemImm::imm(0x01010101),
dst,
));
// shr $24, dst
ctx.emit(Inst::shift_r(
OperandSize::Size32,
ShiftKind::ShiftRightLogical,
Some(24),
dst,
));
}
}
if dsts.len() == 1 {
let final_dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::gen_move(final_dst, dsts[0], types::I64));
} else {
assert!(dsts.len() == 2);
let final_dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gen_move(final_dst.regs()[0], dsts[0], types::I64));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Add,
RegMemImm::reg(dsts[1]),
final_dst.regs()[0],
));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Xor,
RegMemImm::reg(final_dst.regs()[1].to_reg()),
final_dst.regs()[1],
));
}
} else {
// Lower `popcount` for vectors.
let ty = ty.unwrap();
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
if isa_flags.use_avx512vl_simd() && isa_flags.use_avx512bitalg_simd() {
// When AVX512VL and AVX512BITALG are available,
// `popcnt.i8x16` can be lowered to a single instruction.
assert_eq!(ty, types::I8X16);
ctx.emit(Inst::xmm_unary_rm_r_evex(
Avx512Opcode::Vpopcntb,
RegMem::reg(src),
dst,
));
} else {
// For SIMD 4.4 we use Mula's algorithm (https://arxiv.org/pdf/1611.07612.pdf)
//
//__m128i count_bytes ( __m128i v) {
// __m128i lookup = _mm_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4);
// __m128i low_mask = _mm_set1_epi8 (0x0f);
// __m128i lo = _mm_and_si128 (v, low_mask);
// __m128i hi = _mm_and_si128 (_mm_srli_epi16 (v, 4), low_mask);
// __m128i cnt1 = _mm_shuffle_epi8 (lookup, lo);
// __m128i cnt2 = _mm_shuffle_epi8 (lookup, hi);
// return _mm_add_epi8 (cnt1, cnt2);
//}
//
// Details of the above algorithm can be found in the reference noted above, but the basics
// are to create a lookup table that pre populates the popcnt values for each number [0,15].
// The algorithm uses shifts to isolate 4 bit sections of the vector, pshufb as part of the
// lookup process, and adds together the results.
// __m128i lookup = _mm_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4);
static POPCOUNT_4BIT: [u8; 16] = [
0x00, 0x01, 0x01, 0x02, 0x01, 0x02, 0x02, 0x03, 0x01, 0x02, 0x02, 0x03,
0x02, 0x03, 0x03, 0x04,
];
let lookup = ctx.use_constant(VCodeConstantData::WellKnown(&POPCOUNT_4BIT));
// Create a mask for lower 4bits of each subword.
static LOW_MASK: [u8; 16] = [0x0F; 16];
let low_mask_const = ctx.use_constant(VCodeConstantData::WellKnown(&LOW_MASK));
let low_mask = ctx.alloc_tmp(types::I8X16).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(low_mask_const, low_mask, ty));
// __m128i lo = _mm_and_si128 (v, low_mask);
let lo = ctx.alloc_tmp(types::I8X16).only_reg().unwrap();
ctx.emit(Inst::gen_move(lo, low_mask.to_reg(), types::I8X16));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pand, RegMem::reg(src), lo));
// __m128i hi = _mm_and_si128 (_mm_srli_epi16 (v, 4), low_mask);
ctx.emit(Inst::gen_move(dst, src, ty));
ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Psrlw, RegMemImm::imm(4), dst));
let tmp = ctx.alloc_tmp(types::I8X16).only_reg().unwrap();
ctx.emit(Inst::gen_move(tmp, low_mask.to_reg(), types::I8X16));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pand,
RegMem::reg(dst.to_reg()),
tmp,
));
// __m128i cnt1 = _mm_shuffle_epi8 (lookup, lo);
let tmp2 = ctx.alloc_tmp(types::I8X16).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(lookup, tmp2, ty));
ctx.emit(Inst::gen_move(dst, tmp2.to_reg(), types::I8X16));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pshufb,
RegMem::reg(lo.to_reg()),
dst,
));
// __m128i cnt2 = _mm_shuffle_epi8 (lookup , hi) ;
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pshufb,
RegMem::reg(tmp.to_reg()),
tmp2,
));
// return _mm_add_epi8 (cnt1 , cnt2 ) ;
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Paddb,
RegMem::reg(tmp2.to_reg()),
dst,
));
}
}
}
Opcode::Bitrev => {
let ty = ctx.input_ty(insn, 0);
assert!(
ty == types::I8
|| ty == types::I16
|| ty == types::I32
|| ty == types::I64
|| ty == types::I128
);
if ty == types::I128 {
let src = put_input_in_regs(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
emit_bitrev(ctx, src.regs()[0], dst.regs()[1], types::I64);
emit_bitrev(ctx, src.regs()[1], dst.regs()[0], types::I64);
} else {
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
emit_bitrev(ctx, src, dst, ty);
}
}
Opcode::IsNull | Opcode::IsInvalid => {
// Null references are represented by the constant value 0; invalid references are
// represented by the constant value -1. See `define_reftypes()` in
// `meta/src/isa/x86/encodings.rs` to confirm.
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let ty = ctx.input_ty(insn, 0);
let imm = match op {
Opcode::IsNull => {
// TODO could use tst src, src for IsNull
0
}
Opcode::IsInvalid => {
// We can do a 32-bit comparison even in 64-bits mode, as the constant is then
// sign-extended.
0xffffffff
}
_ => unreachable!(),
};
ctx.emit(Inst::cmp_rmi_r(
OperandSize::from_ty(ty),
RegMemImm::imm(imm),
src,
));
ctx.emit(Inst::setcc(CC::Z, dst));
}
Opcode::Uextend | Opcode::Sextend | Opcode::Breduce | Opcode::Bextend | Opcode::Ireduce => {
let src_ty = ctx.input_ty(insn, 0);
let dst_ty = ctx.output_ty(insn, 0);
if src_ty == types::I128 {
assert!(dst_ty.bits() <= 64);
assert!(op == Opcode::Ireduce);
let src = put_input_in_regs(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::gen_move(dst, src.regs()[0], types::I64));
} else if dst_ty == types::I128 {
assert!(src_ty.bits() <= 64);
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
assert!(op == Opcode::Uextend || op == Opcode::Sextend);
// Extend to 64 bits first.
let ext_mode = ExtMode::new(src_ty.bits(), /* dst bits = */ 64);
if let Some(ext_mode) = ext_mode {
if op == Opcode::Sextend {
ctx.emit(Inst::movsx_rm_r(ext_mode, RegMem::reg(src), dst.regs()[0]));
} else {
ctx.emit(Inst::movzx_rm_r(ext_mode, RegMem::reg(src), dst.regs()[0]));
}
} else {
ctx.emit(Inst::mov64_rm_r(RegMem::reg(src), dst.regs()[0]));
}
// Now generate the top 64 bits.
if op == Opcode::Sextend {
// Sign-extend: move dst[0] into dst[1] and arithmetic-shift right by 63 bits
// to spread the sign bit across all bits.
ctx.emit(Inst::gen_move(
dst.regs()[1],
dst.regs()[0].to_reg(),
types::I64,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightArithmetic,
Some(63),
dst.regs()[1],
));
} else {
// Zero-extend: just zero the top word.
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Xor,
RegMemImm::reg(dst.regs()[1].to_reg()),
dst.regs()[1],
));
}
} else {
// Sextend requires a sign-extended move, but all the other opcodes are simply a move
// from a zero-extended source. Here is why this works, in each case:
//
// - Breduce, Bextend: changing width of a boolean. We
// represent a bool as a 0 or -1, so Breduce can mask, while
// Bextend must sign-extend.
//
// - Ireduce: changing width of an integer. Smaller ints are stored with undefined
// high-order bits, so we can simply do a copy.
let is_sextend = match op {
Opcode::Sextend | Opcode::Bextend => true,
_ => false,
};
if src_ty == types::I32 && dst_ty == types::I64 && !is_sextend {
// As a particular x64 extra-pattern matching opportunity, all the ALU opcodes on
// 32-bits will zero-extend the upper 32-bits, so we can even not generate a
// zero-extended move in this case.
// TODO add loads and shifts here.
if let Some(_) = matches_input_any(
ctx,
inputs[0],
&[
Opcode::Iadd,
Opcode::IaddIfcout,
Opcode::Isub,
Opcode::Imul,
Opcode::Band,
Opcode::Bor,
Opcode::Bxor,
],
) {
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::gen_move(dst, src, types::I64));
return Ok(());
}
}
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let ext_mode = ExtMode::new(src_ty.bits(), dst_ty.bits());
assert_eq!(
src_ty.bits() < dst_ty.bits(),
ext_mode.is_some(),
"unexpected extension: {} -> {}",
src_ty,
dst_ty
);
if let Some(ext_mode) = ext_mode {
if is_sextend {
ctx.emit(Inst::movsx_rm_r(ext_mode, src, dst));
} else {
ctx.emit(Inst::movzx_rm_r(ext_mode, src, dst));
}
} else {
ctx.emit(Inst::mov64_rm_r(src, dst));
}
}
}
Opcode::Bint => {
// Booleans are stored as all-zeroes (0) or all-ones (-1). We AND
// out the LSB to give a 0 / 1-valued integer result.
let rn = put_input_in_reg(ctx, inputs[0]);
let rd = get_output_reg(ctx, outputs[0]);
let ty = ctx.output_ty(insn, 0);
ctx.emit(Inst::gen_move(rd.regs()[0], rn, types::I64));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::And,
RegMemImm::imm(1),
rd.regs()[0],
));
if ty == types::I128 {
let upper = rd.regs()[1];
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size64,
AluRmiROpcode::Xor,
RegMemImm::reg(upper.to_reg()),
upper,
));
}
}
Opcode::Icmp => {
let condcode = ctx.data(insn).cond_code().unwrap();
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let ty = ctx.input_ty(insn, 0);
if !ty.is_vector() {
let condcode = emit_cmp(ctx, insn, condcode);
let cc = CC::from_intcc(condcode);
ctx.emit(Inst::setcc(cc, dst));
} else {
assert_eq!(ty.bits(), 128);
let eq = |ty| match ty {
types::I8X16 => SseOpcode::Pcmpeqb,
types::I16X8 => SseOpcode::Pcmpeqw,
types::I32X4 => SseOpcode::Pcmpeqd,
types::I64X2 => SseOpcode::Pcmpeqq,
_ => panic!(
"Unable to find an instruction for {} for type: {}",
condcode, ty
),
};
let gt = |ty| match ty {
types::I8X16 => SseOpcode::Pcmpgtb,
types::I16X8 => SseOpcode::Pcmpgtw,
types::I32X4 => SseOpcode::Pcmpgtd,
types::I64X2 => SseOpcode::Pcmpgtq,
_ => panic!(
"Unable to find an instruction for {} for type: {}",
condcode, ty
),
};
let maxu = |ty| match ty {
types::I8X16 => SseOpcode::Pmaxub,
types::I16X8 => SseOpcode::Pmaxuw,
types::I32X4 => SseOpcode::Pmaxud,
_ => panic!(
"Unable to find an instruction for {} for type: {}",
condcode, ty
),
};
let mins = |ty| match ty {
types::I8X16 => SseOpcode::Pminsb,
types::I16X8 => SseOpcode::Pminsw,
types::I32X4 => SseOpcode::Pminsd,
_ => panic!(
"Unable to find an instruction for {} for type: {}",
condcode, ty
),
};
let minu = |ty| match ty {
types::I8X16 => SseOpcode::Pminub,
types::I16X8 => SseOpcode::Pminuw,
types::I32X4 => SseOpcode::Pminud,
_ => panic!(
"Unable to find an instruction for {} for type: {}",
condcode, ty
),
};
// Here we decide which operand to use as the read/write `dst` (ModRM reg field) and
// which to use as the read `input` (ModRM r/m field). In the normal case we use
// Cranelift's first operand, the `lhs`, as `dst` but we flip the operands for the
// less-than cases so that we can reuse the greater-than implementation.
//
// In a surprising twist, the operands for i64x2 `gte`/`sle` must also be flipped
// from the normal order because of the special-case lowering for these instructions
// (i.e. we use PCMPGTQ with flipped operands and negate the result).
let input = match condcode {
IntCC::SignedLessThanOrEqual if ty == types::I64X2 => {
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem(ctx, inputs[1]);
ctx.emit(Inst::gen_move(dst, lhs, ty));
rhs
}
IntCC::SignedGreaterThanOrEqual if ty == types::I64X2 => {
let lhs = input_to_reg_mem(ctx, inputs[0]);
let rhs = put_input_in_reg(ctx, inputs[1]);
ctx.emit(Inst::gen_move(dst, rhs, ty));
lhs
}
IntCC::SignedLessThan
| IntCC::SignedLessThanOrEqual
| IntCC::UnsignedLessThan
| IntCC::UnsignedLessThanOrEqual => {
let lhs = input_to_reg_mem(ctx, inputs[0]);
let rhs = put_input_in_reg(ctx, inputs[1]);
ctx.emit(Inst::gen_move(dst, rhs, ty));
lhs
}
_ => {
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem(ctx, inputs[1]);
ctx.emit(Inst::gen_move(dst, lhs, ty));
rhs
}
};
match condcode {
IntCC::Equal => ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst)),
IntCC::NotEqual => {
ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst));
// Emit all 1s into the `tmp` register.
let tmp = ctx.alloc_tmp(ty).only_reg().unwrap();
ctx.emit(Inst::xmm_rm_r(eq(ty), RegMem::from(tmp), tmp));
// Invert the result of the `PCMPEQ*`.
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), dst));
}
IntCC::SignedGreaterThan | IntCC::SignedLessThan => {
ctx.emit(Inst::xmm_rm_r(gt(ty), input, dst))
}
IntCC::SignedGreaterThanOrEqual | IntCC::SignedLessThanOrEqual
if ty != types::I64X2 =>
{
ctx.emit(Inst::xmm_rm_r(mins(ty), input.clone(), dst));
ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst))
}
IntCC::SignedGreaterThanOrEqual | IntCC::SignedLessThanOrEqual
if ty == types::I64X2 =>
{
// The PMINS* instruction is only available in AVX512VL/F so we must instead
// compare with flipped operands and negate the result (emitting one more
// instruction).
ctx.emit(Inst::xmm_rm_r(gt(ty), input, dst));
// Emit all 1s into the `tmp` register.
let tmp = ctx.alloc_tmp(ty).only_reg().unwrap();
ctx.emit(Inst::xmm_rm_r(eq(ty), RegMem::from(tmp), tmp));
// Invert the result of the `PCMPGT*`.
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), dst));
}
IntCC::UnsignedGreaterThan | IntCC::UnsignedLessThan => {
ctx.emit(Inst::xmm_rm_r(maxu(ty), input.clone(), dst));
ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst));
// Emit all 1s into the `tmp` register.
let tmp = ctx.alloc_tmp(ty).only_reg().unwrap();
ctx.emit(Inst::xmm_rm_r(eq(ty), RegMem::from(tmp), tmp));
// Invert the result of the `PCMPEQ*`.
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), dst));
}
IntCC::UnsignedGreaterThanOrEqual | IntCC::UnsignedLessThanOrEqual => {
ctx.emit(Inst::xmm_rm_r(minu(ty), input.clone(), dst));
ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst))
}
_ => unimplemented!("Unimplemented comparison code for icmp: {}", condcode),
}
}
}
Opcode::Fcmp => {
let cond_code = ctx.data(insn).fp_cond_code().unwrap();
let input_ty = ctx.input_ty(insn, 0);
if !input_ty.is_vector() {
// Unordered is returned by setting ZF, PF, CF <- 111
// Greater than by ZF, PF, CF <- 000
// Less than by ZF, PF, CF <- 001
// Equal by ZF, PF, CF <- 100
//
// Checking the result of comiss is somewhat annoying because you don't have setcc
// instructions that explicitly check simultaneously for the condition (i.e. eq, le,
// gt, etc) *and* orderedness.
//
// So that might mean we need more than one setcc check and then a logical "and" or
// "or" to determine both, in some cases. However knowing that if the parity bit is
// set, then the result was considered unordered and knowing that if the parity bit is
// set, then both the ZF and CF flag bits must also be set we can get away with using
// one setcc for most condition codes.
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
match emit_fcmp(ctx, insn, cond_code, FcmpSpec::Normal) {
FcmpCondResult::Condition(cc) => {
ctx.emit(Inst::setcc(cc, dst));
}
FcmpCondResult::AndConditions(cc1, cc2) => {
let tmp = ctx.alloc_tmp(types::I32).only_reg().unwrap();
ctx.emit(Inst::setcc(cc1, tmp));
ctx.emit(Inst::setcc(cc2, dst));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size32,
AluRmiROpcode::And,
RegMemImm::reg(tmp.to_reg()),
dst,
));
}
FcmpCondResult::OrConditions(cc1, cc2) => {
let tmp = ctx.alloc_tmp(types::I32).only_reg().unwrap();
ctx.emit(Inst::setcc(cc1, tmp));
ctx.emit(Inst::setcc(cc2, dst));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size32,
AluRmiROpcode::Or,
RegMemImm::reg(tmp.to_reg()),
dst,
));
}
FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(),
}
} else {
let op = match input_ty {
types::F32X4 => SseOpcode::Cmpps,
types::F64X2 => SseOpcode::Cmppd,
_ => panic!("Bad input type to fcmp: {}", input_ty),
};
// Since some packed comparisons are not available, some of the condition codes
// must be inverted, with a corresponding `flip` of the operands.
let (imm, flip) = match cond_code {
FloatCC::GreaterThan => (FcmpImm::LessThan, true),
FloatCC::GreaterThanOrEqual => (FcmpImm::LessThanOrEqual, true),
FloatCC::UnorderedOrLessThan => (FcmpImm::UnorderedOrGreaterThan, true),
FloatCC::UnorderedOrLessThanOrEqual => {
(FcmpImm::UnorderedOrGreaterThanOrEqual, true)
}
FloatCC::OrderedNotEqual | FloatCC::UnorderedOrEqual => {
panic!("unsupported float condition code: {}", cond_code)
}
_ => (FcmpImm::from(cond_code), false),
};
// Determine the operands of the comparison, possibly by flipping them.
let (lhs, rhs) = if flip {
(
put_input_in_reg(ctx, inputs[1]),
input_to_reg_mem(ctx, inputs[0]),
)
} else {
(
put_input_in_reg(ctx, inputs[0]),
input_to_reg_mem(ctx, inputs[1]),
)
};
// Move the `lhs` to the same register as `dst`; this may not emit an actual move
// but ensures that the registers are the same to match x86's read-write operand
// encoding.
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::gen_move(dst, lhs, input_ty));
// Emit the comparison.
ctx.emit(Inst::xmm_rm_r_imm(
op,
rhs,
dst,
imm.encode(),
OperandSize::Size32,
));
}
}
Opcode::FallthroughReturn | Opcode::Return => {
for i in 0..ctx.num_inputs(insn) {
let src_reg = put_input_in_regs(ctx, inputs[i]);
let retval_reg = ctx.retval(i);
let ty = ctx.input_ty(insn, i);
assert!(src_reg.len() == retval_reg.len());
let (_, tys) = Inst::rc_for_type(ty)?;
for ((&src, &dst), &ty) in src_reg
.regs()
.iter()
.zip(retval_reg.regs().iter())
.zip(tys.iter())
{
ctx.emit(Inst::gen_move(dst, src, ty));
}
}
// N.B.: the Ret itself is generated by the ABI.
}
Opcode::Call | Opcode::CallIndirect => {
let caller_conv = ctx.abi().call_conv();
let (mut abi, inputs) = match op {
Opcode::Call => {
let (extname, dist) = ctx.call_target(insn).unwrap();
let sig = ctx.call_sig(insn).unwrap();
assert_eq!(inputs.len(), sig.params.len());
assert_eq!(outputs.len(), sig.returns.len());
(
X64ABICaller::from_func(sig, &extname, dist, caller_conv, flags)?,
&inputs[..],
)
}
Opcode::CallIndirect => {
let ptr = put_input_in_reg(ctx, inputs[0]);
let sig = ctx.call_sig(insn).unwrap();
assert_eq!(inputs.len() - 1, sig.params.len());
assert_eq!(outputs.len(), sig.returns.len());
(
X64ABICaller::from_ptr(sig, ptr, op, caller_conv, flags)?,
&inputs[1..],
)
}
_ => unreachable!(),
};
abi.emit_stack_pre_adjust(ctx);
assert_eq!(inputs.len(), abi.num_args());
for i in abi.get_copy_to_arg_order() {
let input = inputs[i];
let arg_regs = put_input_in_regs(ctx, input);
abi.emit_copy_regs_to_arg(ctx, i, arg_regs);
}
abi.emit_call(ctx);
for (i, output) in outputs.iter().enumerate() {
let retval_regs = get_output_reg(ctx, *output);
abi.emit_copy_retval_to_regs(ctx, i, retval_regs);
}
abi.emit_stack_post_adjust(ctx);
}
Opcode::Debugtrap => {
ctx.emit(Inst::Hlt);
}
Opcode::Trap | Opcode::ResumableTrap => {
let trap_code = ctx.data(insn).trap_code().unwrap();
ctx.emit_safepoint(Inst::Ud2 { trap_code });
}
Opcode::Trapif | Opcode::Trapff => {
let trap_code = ctx.data(insn).trap_code().unwrap();
if matches_input(ctx, inputs[0], Opcode::IaddIfcout).is_some() {
let cond_code = ctx.data(insn).cond_code().unwrap();
// The flags must not have been clobbered by any other instruction between the
// iadd_ifcout and this instruction, as verified by the CLIF validator; so we can
// simply use the flags here.
let cc = CC::from_intcc(cond_code);
ctx.emit_safepoint(Inst::TrapIf { trap_code, cc });
} else if op == Opcode::Trapif {
let cond_code = ctx.data(insn).cond_code().unwrap();
// Verification ensures that the input is always a single-def ifcmp.
let ifcmp = matches_input(ctx, inputs[0], Opcode::Ifcmp).unwrap();
let cond_code = emit_cmp(ctx, ifcmp, cond_code);
let cc = CC::from_intcc(cond_code);
ctx.emit_safepoint(Inst::TrapIf { trap_code, cc });
} else {
let cond_code = ctx.data(insn).fp_cond_code().unwrap();
// Verification ensures that the input is always a single-def ffcmp.
let ffcmp = matches_input(ctx, inputs[0], Opcode::Ffcmp).unwrap();
match emit_fcmp(ctx, ffcmp, cond_code, FcmpSpec::Normal) {
FcmpCondResult::Condition(cc) => {
ctx.emit_safepoint(Inst::TrapIf { trap_code, cc })
}
FcmpCondResult::AndConditions(cc1, cc2) => {
// A bit unfortunate, but materialize the flags in their own register, and
// check against this.
let tmp = ctx.alloc_tmp(types::I32).only_reg().unwrap();
let tmp2 = ctx.alloc_tmp(types::I32).only_reg().unwrap();
ctx.emit(Inst::setcc(cc1, tmp));
ctx.emit(Inst::setcc(cc2, tmp2));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size32,
AluRmiROpcode::And,
RegMemImm::reg(tmp.to_reg()),
tmp2,
));
ctx.emit_safepoint(Inst::TrapIf {
trap_code,
cc: CC::NZ,
});
}
FcmpCondResult::OrConditions(cc1, cc2) => {
ctx.emit_safepoint(Inst::TrapIf { trap_code, cc: cc1 });
ctx.emit_safepoint(Inst::TrapIf { trap_code, cc: cc2 });
}
FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(),
};
};
}
Opcode::F64const => {
unreachable!(
"implemented in ISLE: inst = `{}`, type = `{:?}`",
ctx.dfg().display_inst(insn),
ty
);
}
Opcode::F32const => {
unreachable!(
"implemented in ISLE: inst = `{}`, type = `{:?}`",
ctx.dfg().display_inst(insn),
ty
);
}
Opcode::WideningPairwiseDotProductS => {
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let ty = ty.unwrap();
ctx.emit(Inst::gen_move(dst, lhs, ty));
if ty == types::I32X4 {
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmaddwd, rhs, dst));
} else {
panic!(
"Opcode::WideningPairwiseDotProductS: unsupported laneage: {:?}",
ty
);
}
}
Opcode::Fadd | Opcode::Fsub | Opcode::Fmul | Opcode::Fdiv => {
let lhs = put_input_in_reg(ctx, inputs[0]);
// We can't guarantee the RHS (if a load) is 128-bit aligned, so we
// must avoid merging a load here.
let rhs = RegMem::reg(put_input_in_reg(ctx, inputs[1]));
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let ty = ty.unwrap();
// Move the `lhs` to the same register as `dst`; this may not emit an actual move
// but ensures that the registers are the same to match x86's read-write operand
// encoding.
ctx.emit(Inst::gen_move(dst, lhs, ty));
// Note: min and max can't be handled here, because of the way Cranelift defines them:
// if any operand is a NaN, they must return the NaN operand, while the x86 machine
// instruction will return the second operand if either operand is a NaN.
let sse_op = match ty {
types::F32 => match op {
Opcode::Fadd => SseOpcode::Addss,
Opcode::Fsub => SseOpcode::Subss,
Opcode::Fmul => SseOpcode::Mulss,
Opcode::Fdiv => SseOpcode::Divss,
_ => unreachable!(),
},
types::F64 => match op {
Opcode::Fadd => SseOpcode::Addsd,
Opcode::Fsub => SseOpcode::Subsd,
Opcode::Fmul => SseOpcode::Mulsd,
Opcode::Fdiv => SseOpcode::Divsd,
_ => unreachable!(),
},
types::F32X4 => match op {
Opcode::Fadd => SseOpcode::Addps,
Opcode::Fsub => SseOpcode::Subps,
Opcode::Fmul => SseOpcode::Mulps,
Opcode::Fdiv => SseOpcode::Divps,
_ => unreachable!(),
},
types::F64X2 => match op {
Opcode::Fadd => SseOpcode::Addpd,
Opcode::Fsub => SseOpcode::Subpd,
Opcode::Fmul => SseOpcode::Mulpd,
Opcode::Fdiv => SseOpcode::Divpd,
_ => unreachable!(),
},
_ => panic!(
"invalid type: expected one of [F32, F64, F32X4, F64X2], found {}",
ty
),
};
ctx.emit(Inst::xmm_rm_r(sse_op, rhs, dst));
}
Opcode::Fmin | Opcode::Fmax => {
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = put_input_in_reg(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let is_min = op == Opcode::Fmin;
let output_ty = ty.unwrap();
ctx.emit(Inst::gen_move(dst, rhs, output_ty));
if !output_ty.is_vector() {
let op_size = match output_ty {
types::F32 => OperandSize::Size32,
types::F64 => OperandSize::Size64,
_ => panic!("unexpected type {:?} for fmin/fmax", output_ty),
};
ctx.emit(Inst::xmm_min_max_seq(op_size, is_min, lhs, dst));
} else {
// X64's implementation of floating point min and floating point max does not
// propagate NaNs and +0's in a way that is friendly to the SIMD spec. For the
// scalar approach we use jumps to handle cases where NaN and +0 propagation is
// not consistent with what is needed. However for packed floating point min and
// floating point max we implement a different approach to avoid the sequence
// of jumps that would be required on a per lane basis. Because we do not need to
// lower labels and jumps but do need ctx for creating temporaries we implement
// the lowering here in lower.rs instead of emit.rs as is done in the case for scalars.
// The outline of approach is as follows:
//
// First we preform the Min/Max in both directions. This is because in the
// case of an operand's lane containing a NaN or in the case of the lanes of the
// two operands containing 0 but with mismatched signs, x64 will return the second
// operand regardless of its contents. So in order to make sure we capture NaNs and
// normalize NaNs and 0 values we capture the operation in both directions and merge the
// results. Then we normalize the results through operations that create a mask for the
// lanes containing NaNs, we use that mask to adjust NaNs to quite NaNs and normalize
// 0s.
//
// The following sequence is generated for min:
//
// movap{s,d} %lhs, %tmp
// minp{s,d} %dst, %tmp
// minp,{s,d} %lhs, %dst
// orp{s,d} %dst, %tmp
// cmpp{s,d} %tmp, %dst, $3
// orps{s,d} %dst, %tmp
// psrl{s,d} {$10, $13}, %dst
// andnp{s,d} %tmp, %dst
//
// and for max the sequence is:
//
// movap{s,d} %lhs, %tmp
// minp{s,d} %dst, %tmp
// minp,{s,d} %lhs, %dst
// xorp{s,d} %tmp, %dst
// orp{s,d} %dst, %tmp
// subp{s,d} %dst, %tmp
// cmpp{s,d} %tmp, %dst, $3
// psrl{s,d} {$10, $13}, %dst
// andnp{s,d} %tmp, %dst
if is_min {
let (mov_op, min_op, or_op, cmp_op, shift_op, shift_by, andn_op) =
match output_ty {
types::F32X4 => (
SseOpcode::Movaps,
SseOpcode::Minps,
SseOpcode::Orps,
SseOpcode::Cmpps,
SseOpcode::Psrld,
10,
SseOpcode::Andnps,
),
types::F64X2 => (
SseOpcode::Movapd,
SseOpcode::Minpd,
SseOpcode::Orpd,
SseOpcode::Cmppd,
SseOpcode::Psrlq,
13,
SseOpcode::Andnpd,
),
_ => unimplemented!("unsupported op type {:?}", output_ty),
};
// Copy lhs into tmp
let tmp_xmm1 = ctx.alloc_tmp(output_ty).only_reg().unwrap();
ctx.emit(Inst::xmm_mov(mov_op, RegMem::reg(lhs), tmp_xmm1));
// Perform min in reverse direction
ctx.emit(Inst::xmm_rm_r(min_op, RegMem::from(dst), tmp_xmm1));
// Perform min in original direction
ctx.emit(Inst::xmm_rm_r(min_op, RegMem::reg(lhs), dst));
// X64 handles propagation of -0's and Nans differently between left and right
// operands. After doing the min in both directions, this OR will
// guarrentee capture of -0's and Nan in our tmp register
ctx.emit(Inst::xmm_rm_r(or_op, RegMem::from(dst), tmp_xmm1));
// Compare unordered to create mask for lanes containing NaNs and then use
// that mask to saturate the NaN containing lanes in the tmp register with 1s.
// TODO: Would a check for NaN and then a jump be better here in the
// common case than continuing on to normalize NaNs that might not exist?
let cond = FcmpImm::from(FloatCC::Unordered);
ctx.emit(Inst::xmm_rm_r_imm(
cmp_op,
RegMem::reg(tmp_xmm1.to_reg()),
dst,
cond.encode(),
OperandSize::Size32,
));
ctx.emit(Inst::xmm_rm_r(or_op, RegMem::reg(dst.to_reg()), tmp_xmm1));
// The dst register holds a mask for lanes containing NaNs.
// We take that mask and shift in preparation for creating a different mask
// to normalize NaNs (create a quite NaN) by zeroing out the appropriate
// number of least signficant bits. We shift right each lane by 10 bits
// (1 sign + 8 exp. + 1 MSB sig.) for F32X4 and by 13 bits (1 sign +
// 11 exp. + 1 MSB sig.) for F64X2.
ctx.emit(Inst::xmm_rmi_reg(shift_op, RegMemImm::imm(shift_by), dst));
// Finally we do a nand with the tmp register to produce the final results
// in the dst.
ctx.emit(Inst::xmm_rm_r(andn_op, RegMem::reg(tmp_xmm1.to_reg()), dst));
} else {
let (
mov_op,
max_op,
xor_op,
or_op,
sub_op,
cmp_op,
shift_op,
shift_by,
andn_op,
) = match output_ty {
types::F32X4 => (
SseOpcode::Movaps,
SseOpcode::Maxps,
SseOpcode::Xorps,
SseOpcode::Orps,
SseOpcode::Subps,
SseOpcode::Cmpps,
SseOpcode::Psrld,
10,
SseOpcode::Andnps,
),
types::F64X2 => (
SseOpcode::Movapd,
SseOpcode::Maxpd,
SseOpcode::Xorpd,
SseOpcode::Orpd,
SseOpcode::Subpd,
SseOpcode::Cmppd,
SseOpcode::Psrlq,
13,
SseOpcode::Andnpd,
),
_ => unimplemented!("unsupported op type {:?}", output_ty),
};
// Copy lhs into tmp.
let tmp_xmm1 = ctx.alloc_tmp(types::F32).only_reg().unwrap();
ctx.emit(Inst::xmm_mov(mov_op, RegMem::reg(lhs), tmp_xmm1));
// Perform max in reverse direction.
ctx.emit(Inst::xmm_rm_r(max_op, RegMem::reg(dst.to_reg()), tmp_xmm1));
// Perform max in original direction.
ctx.emit(Inst::xmm_rm_r(max_op, RegMem::reg(lhs), dst));
// Get the difference between the two results and store in tmp.
// Max uses a different approach than min to account for potential
// discrepancies with plus/minus 0.
ctx.emit(Inst::xmm_rm_r(xor_op, RegMem::reg(tmp_xmm1.to_reg()), dst));
// X64 handles propagation of -0's and Nans differently between left and right
// operands. After doing the max in both directions, this OR will
// guarentee capture of 0's and Nan in our tmp register.
ctx.emit(Inst::xmm_rm_r(or_op, RegMem::reg(dst.to_reg()), tmp_xmm1));
// Capture NaNs and sign discrepancies.
ctx.emit(Inst::xmm_rm_r(sub_op, RegMem::reg(dst.to_reg()), tmp_xmm1));
// Compare unordered to create mask for lanes containing NaNs and then use
// that mask to saturate the NaN containing lanes in the tmp register with 1s.
let cond = FcmpImm::from(FloatCC::Unordered);
ctx.emit(Inst::xmm_rm_r_imm(
cmp_op,
RegMem::reg(tmp_xmm1.to_reg()),
dst,
cond.encode(),
OperandSize::Size32,
));
// The dst register holds a mask for lanes containing NaNs.
// We take that mask and shift in preparation for creating a different mask
// to normalize NaNs (create a quite NaN) by zeroing out the appropriate
// number of least signficant bits. We shift right each lane by 10 bits
// (1 sign + 8 exp. + 1 MSB sig.) for F32X4 and by 13 bits (1 sign +
// 11 exp. + 1 MSB sig.) for F64X2.
ctx.emit(Inst::xmm_rmi_reg(shift_op, RegMemImm::imm(shift_by), dst));
// Finally we do a nand with the tmp register to produce the final results
// in the dst.
ctx.emit(Inst::xmm_rm_r(andn_op, RegMem::reg(tmp_xmm1.to_reg()), dst));
}
}
}
Opcode::FminPseudo | Opcode::FmaxPseudo => {
// We can't guarantee the RHS (if a load) is 128-bit aligned, so we
// must avoid merging a load here.
let lhs = RegMem::reg(put_input_in_reg(ctx, inputs[0]));
let rhs = put_input_in_reg(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let ty = ty.unwrap();
ctx.emit(Inst::gen_move(dst, rhs, ty));
let sse_opcode = match (ty, op) {
(types::F32, Opcode::FminPseudo) => SseOpcode::Minss,
(types::F32, Opcode::FmaxPseudo) => SseOpcode::Maxss,
(types::F64, Opcode::FminPseudo) => SseOpcode::Minsd,
(types::F64, Opcode::FmaxPseudo) => SseOpcode::Maxsd,
(types::F32X4, Opcode::FminPseudo) => SseOpcode::Minps,
(types::F32X4, Opcode::FmaxPseudo) => SseOpcode::Maxps,
(types::F64X2, Opcode::FminPseudo) => SseOpcode::Minpd,
(types::F64X2, Opcode::FmaxPseudo) => SseOpcode::Maxpd,
_ => unimplemented!("unsupported type {} for {}", ty, op),
};
ctx.emit(Inst::xmm_rm_r(sse_opcode, lhs, dst));
}
Opcode::Sqrt => {
// We can't guarantee the RHS (if a load) is 128-bit aligned, so we
// must avoid merging a load here.
let src = RegMem::reg(put_input_in_reg(ctx, inputs[0]));
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let ty = ty.unwrap();
let sse_op = match ty {
types::F32 => SseOpcode::Sqrtss,
types::F64 => SseOpcode::Sqrtsd,
types::F32X4 => SseOpcode::Sqrtps,
types::F64X2 => SseOpcode::Sqrtpd,
_ => panic!(
"invalid type: expected one of [F32, F64, F32X4, F64X2], found {}",
ty
),
};
ctx.emit(Inst::xmm_unary_rm_r(sse_op, src, dst));
}
Opcode::Fpromote => {
// We can't guarantee the RHS (if a load) is 128-bit aligned, so we
// must avoid merging a load here.
let src = RegMem::reg(put_input_in_reg(ctx, inputs[0]));
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::xmm_unary_rm_r(SseOpcode::Cvtss2sd, src, dst));
}
Opcode::FvpromoteLow => {
let src = RegMem::reg(put_input_in_reg(ctx, inputs[0]));
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::xmm_unary_rm_r(
SseOpcode::Cvtps2pd,
RegMem::from(src),
dst,
));
}
Opcode::Fdemote => {
// We can't guarantee the RHS (if a load) is 128-bit aligned, so we
// must avoid merging a load here.
let src = RegMem::reg(put_input_in_reg(ctx, inputs[0]));
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::xmm_unary_rm_r(SseOpcode::Cvtsd2ss, src, dst));
}
Opcode::Fvdemote => {
let src = RegMem::reg(put_input_in_reg(ctx, inputs[0]));
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::xmm_unary_rm_r(
SseOpcode::Cvtpd2ps,
RegMem::from(src),
dst,
));
}
Opcode::FcvtFromSint => {
let output_ty = ty.unwrap();
if !output_ty.is_vector() {
let (ext_spec, src_size) = match ctx.input_ty(insn, 0) {
types::I8 | types::I16 => (Some(ExtSpec::SignExtendTo32), OperandSize::Size32),
types::I32 => (None, OperandSize::Size32),
types::I64 => (None, OperandSize::Size64),
_ => unreachable!(),
};
let src = match ext_spec {
Some(ext_spec) => RegMem::reg(extend_input_to_reg(ctx, inputs[0], ext_spec)),
None => RegMem::reg(put_input_in_reg(ctx, inputs[0])),
};
let opcode = if output_ty == types::F32 {
SseOpcode::Cvtsi2ss
} else {
assert_eq!(output_ty, types::F64);
SseOpcode::Cvtsi2sd
};
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::gpr_to_xmm(opcode, src, src_size, dst));
} else {
let ty = ty.unwrap();
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let opcode = match ctx.input_ty(insn, 0) {
types::I32X4 => SseOpcode::Cvtdq2ps,
_ => {
unimplemented!("unable to use type {} for op {}", ctx.input_ty(insn, 0), op)
}
};
ctx.emit(Inst::gen_move(dst, src, ty));
ctx.emit(Inst::xmm_rm_r(opcode, RegMem::from(dst), dst));
}
}
Opcode::FcvtLowFromSint => {
let src = RegMem::reg(put_input_in_reg(ctx, inputs[0]));
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::xmm_unary_rm_r(
SseOpcode::Cvtdq2pd,
RegMem::from(src),
dst,
));
}
Opcode::FcvtFromUint => {
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let ty = ty.unwrap();
let input_ty = ctx.input_ty(insn, 0);
let output_ty = ctx.output_ty(insn, 0);
if !ty.is_vector() {
match input_ty {
types::I8 | types::I16 | types::I32 => {
// Conversion from an unsigned int smaller than 64-bit is easy: zero-extend +
// do a signed conversion (which won't overflow).
let opcode = if ty == types::F32 {
SseOpcode::Cvtsi2ss
} else {
assert_eq!(ty, types::F64);
SseOpcode::Cvtsi2sd
};
let src = RegMem::reg(extend_input_to_reg(
ctx,
inputs[0],
ExtSpec::ZeroExtendTo64,
));
ctx.emit(Inst::gpr_to_xmm(opcode, src, OperandSize::Size64, dst));
}
types::I64 => {
let src = put_input_in_reg(ctx, inputs[0]);
let src_copy = ctx.alloc_tmp(types::I64).only_reg().unwrap();
ctx.emit(Inst::gen_move(src_copy, src, types::I64));
let tmp_gpr1 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let tmp_gpr2 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
ctx.emit(Inst::cvt_u64_to_float_seq(
if ty == types::F64 {
OperandSize::Size64
} else {
OperandSize::Size32
},
src_copy,
tmp_gpr1,
tmp_gpr2,
dst,
));
}
_ => panic!("unexpected input type for FcvtFromUint: {:?}", input_ty),
};
} else if output_ty == types::F64X2 {
if let Some(uwiden) = matches_input(ctx, inputs[0], Opcode::UwidenLow) {
let uwiden_input = InsnInput {
insn: uwiden,
input: 0,
};
let src = put_input_in_reg(ctx, uwiden_input);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let input_ty = ctx.input_ty(uwiden, 0);
// Matches_input further obfuscates which Wasm instruction this is ultimately
// lowering. Check here that the types are as expected for F64x2ConvertLowI32x4U.
debug_assert!(input_ty == types::I32X4);
// Algorithm uses unpcklps to help create a float that is equivalent
// 0x1.0p52 + double(src). 0x1.0p52 is unique because at this exponent
// every value of the mantissa represents a corresponding uint32 number.
// When we subtract 0x1.0p52 we are left with double(src).
let uint_mask = ctx.alloc_tmp(types::I32X4).only_reg().unwrap();
ctx.emit(Inst::gen_move(dst, src, types::I32X4));
static UINT_MASK: [u8; 16] = [
0x00, 0x00, 0x30, 0x43, 0x00, 0x00, 0x30, 0x43, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
];
let uint_mask_const =
ctx.use_constant(VCodeConstantData::WellKnown(&UINT_MASK));
ctx.emit(Inst::xmm_load_const(
uint_mask_const,
uint_mask,
types::I32X4,
));
// Creates 0x1.0p52 + double(src)
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Unpcklps,
RegMem::from(uint_mask),
dst,
));
static UINT_MASK_HIGH: [u8; 16] = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x43, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x30, 0x43,
];
let uint_mask_high_const =
ctx.use_constant(VCodeConstantData::WellKnown(&UINT_MASK_HIGH));
let uint_mask_high = ctx.alloc_tmp(types::I32X4).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(
uint_mask_high_const,
uint_mask_high,
types::I32X4,
));
// 0x1.0p52 + double(src) - 0x1.0p52
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Subpd,
RegMem::from(uint_mask_high),
dst,
));
} else {
panic!("Unsupported FcvtFromUint conversion types: {}", ty);
}
} else {
assert_eq!(ctx.input_ty(insn, 0), types::I32X4);
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
if isa_flags.use_avx512vl_simd() && isa_flags.use_avx512f_simd() {
// When AVX512VL and AVX512F are available,
// `fcvt_from_uint` can be lowered to a single instruction.
ctx.emit(Inst::xmm_unary_rm_r_evex(
Avx512Opcode::Vcvtudq2ps,
RegMem::reg(src),
dst,
));
} else {
// Converting packed unsigned integers to packed floats
// requires a few steps. There is no single instruction
// lowering for converting unsigned floats but there is for
// converting packed signed integers to float (cvtdq2ps). In
// the steps below we isolate the upper half (16 bits) and
// lower half (16 bits) of each lane and then we convert
// each half separately using cvtdq2ps meant for signed
// integers. In order for this to work for the upper half
// bits we must shift right by 1 (divide by 2) these bits in
// order to ensure the most significant bit is 0 not signed,
// and then after the conversion we double the value.
// Finally we add the converted values where addition will
// correctly round.
//
// Sequence:
// -> A = 0xffffffff
// -> Ah = 0xffff0000
// -> Al = 0x0000ffff
// -> Convert(Al) // Convert int to float
// -> Ah = Ah >> 1 // Shift right 1 to assure Ah conversion isn't treated as signed
// -> Convert(Ah) // Convert .. with no loss of significant digits from previous shift
// -> Ah = Ah + Ah // Double Ah to account for shift right before the conversion.
// -> dst = Ah + Al // Add the two floats together
// Create a temporary register
let tmp = ctx.alloc_tmp(types::I32X4).only_reg().unwrap();
ctx.emit(Inst::xmm_unary_rm_r(
SseOpcode::Movapd,
RegMem::reg(src),
tmp,
));
ctx.emit(Inst::gen_move(dst, src, ty));
// Get the low 16 bits
ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Pslld, RegMemImm::imm(16), tmp));
ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Psrld, RegMemImm::imm(16), tmp));
// Get the high 16 bits
ctx.emit(Inst::xmm_rm_r(SseOpcode::Psubd, RegMem::from(tmp), dst));
// Convert the low 16 bits
ctx.emit(Inst::xmm_rm_r(SseOpcode::Cvtdq2ps, RegMem::from(tmp), tmp));
// Shift the high bits by 1, convert, and double to get the correct value.
ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Psrld, RegMemImm::imm(1), dst));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Cvtdq2ps, RegMem::from(dst), dst));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Addps,
RegMem::reg(dst.to_reg()),
dst,
));
// Add together the two converted values.
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Addps,
RegMem::reg(tmp.to_reg()),
dst,
));
}
}
}
Opcode::FcvtToUint | Opcode::FcvtToUintSat | Opcode::FcvtToSint | Opcode::FcvtToSintSat => {
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let input_ty = ctx.input_ty(insn, 0);
if !input_ty.is_vector() {
let src_size = if input_ty == types::F32 {
OperandSize::Size32
} else {
assert_eq!(input_ty, types::F64);
OperandSize::Size64
};
let output_ty = ty.unwrap();
let dst_size = if output_ty == types::I32 {
OperandSize::Size32
} else {
assert_eq!(output_ty, types::I64);
OperandSize::Size64
};
let to_signed = op == Opcode::FcvtToSint || op == Opcode::FcvtToSintSat;
let is_sat = op == Opcode::FcvtToUintSat || op == Opcode::FcvtToSintSat;
let src_copy = ctx.alloc_tmp(input_ty).only_reg().unwrap();
ctx.emit(Inst::gen_move(src_copy, src, input_ty));
let tmp_xmm = ctx.alloc_tmp(input_ty).only_reg().unwrap();
let tmp_gpr = ctx.alloc_tmp(output_ty).only_reg().unwrap();
if to_signed {
ctx.emit(Inst::cvt_float_to_sint_seq(
src_size, dst_size, is_sat, src_copy, dst, tmp_gpr, tmp_xmm,
));
} else {
ctx.emit(Inst::cvt_float_to_uint_seq(
src_size, dst_size, is_sat, src_copy, dst, tmp_gpr, tmp_xmm,
));
}
} else {
if op == Opcode::FcvtToSintSat {
// Sets destination to zero if float is NaN
assert_eq!(types::F32X4, ctx.input_ty(insn, 0));
let tmp = ctx.alloc_tmp(types::I32X4).only_reg().unwrap();
ctx.emit(Inst::xmm_unary_rm_r(
SseOpcode::Movapd,
RegMem::reg(src),
tmp,
));
ctx.emit(Inst::gen_move(dst, src, input_ty));
let cond = FcmpImm::from(FloatCC::Equal);
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Cmpps,
RegMem::reg(tmp.to_reg()),
tmp,
cond.encode(),
OperandSize::Size32,
));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Andps,
RegMem::reg(tmp.to_reg()),
dst,
));
// Sets top bit of tmp if float is positive
// Setting up to set top bit on negative float values
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pxor,
RegMem::reg(dst.to_reg()),
tmp,
));
// Convert the packed float to packed doubleword.
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Cvttps2dq,
RegMem::reg(dst.to_reg()),
dst,
));
// Set top bit only if < 0
// Saturate lane with sign (top) bit.
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pand,
RegMem::reg(dst.to_reg()),
tmp,
));
ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Psrad, RegMemImm::imm(31), tmp));
// On overflow 0x80000000 is returned to a lane.
// Below sets positive overflow lanes to 0x7FFFFFFF
// Keeps negative overflow lanes as is.
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pxor,
RegMem::reg(tmp.to_reg()),
dst,
));
} else if op == Opcode::FcvtToUintSat {
// The algorithm for converting floats to unsigned ints is a little tricky. The
// complication arises because we are converting from a signed 64-bit int with a positive
// integer range from 1..INT_MAX (0x1..0x7FFFFFFF) to an unsigned integer with an extended
// range from (INT_MAX+1)..UINT_MAX. It's this range from (INT_MAX+1)..UINT_MAX
// (0x80000000..0xFFFFFFFF) that needs to be accounted for as a special case since our
// conversion instruction (cvttps2dq) only converts as high as INT_MAX (0x7FFFFFFF), but
// which conveniently setting underflows and overflows (smaller than MIN_INT or larger than
// MAX_INT) to be INT_MAX+1 (0x80000000). Nothing that the range (INT_MAX+1)..UINT_MAX includes
// precisely INT_MAX values we can correctly account for and convert every value in this range
// if we simply subtract INT_MAX+1 before doing the cvttps2dq conversion. After the subtraction
// every value originally (INT_MAX+1)..UINT_MAX is now the range (0..INT_MAX).
// After the conversion we add INT_MAX+1 back to this converted value, noting again that
// values we are trying to account for were already set to INT_MAX+1 during the original conversion.
// We simply have to create a mask and make sure we are adding together only the lanes that need
// to be accounted for. Digesting it all the steps then are:
//
// Step 1 - Account for NaN and negative floats by setting these src values to zero.
// Step 2 - Make a copy (tmp1) of the src value since we need to convert twice for
// reasons described above.
// Step 3 - Convert the original src values. This will convert properly all floats up to INT_MAX
// Step 4 - Subtract INT_MAX from the copy set (tmp1). Note, all zero and negative values are those
// values that were originally in the range (0..INT_MAX). This will come in handy during
// step 7 when we zero negative lanes.
// Step 5 - Create a bit mask for tmp1 that will correspond to all lanes originally less than
// UINT_MAX that are now less than INT_MAX thanks to the subtraction.
// Step 6 - Convert the second set of values (tmp1)
// Step 7 - Prep the converted second set by zeroing out negative lanes (these have already been
// converted correctly with the first set) and by setting overflow lanes to 0x7FFFFFFF
// as this will allow us to properly saturate overflow lanes when adding to 0x80000000
// Step 8 - Add the orginal converted src and the converted tmp1 where float values originally less
// than and equal to INT_MAX will be unchanged, float values originally between INT_MAX+1 and
// UINT_MAX will add together (INT_MAX) + (SRC - INT_MAX), and float values originally
// greater than UINT_MAX will be saturated to UINT_MAX (0xFFFFFFFF) after adding (0x8000000 + 0x7FFFFFFF).
//
//
// The table below illustrates the result after each step where it matters for the converted set.
// Note the original value range (original src set) is the final dst in Step 8:
//
// Original src set:
// | Original Value Range | Step 1 | Step 3 | Step 8 |
// | -FLT_MIN..FLT_MAX | 0.0..FLT_MAX | 0..INT_MAX(w/overflow) | 0..UINT_MAX(w/saturation) |
//
// Copied src set (tmp1):
// | Step 2 | Step 4 |
// | 0.0..FLT_MAX | (0.0-(INT_MAX+1))..(FLT_MAX-(INT_MAX+1)) |
//
// | Step 6 | Step 7 |
// | (0-(INT_MAX+1))..(UINT_MAX-(INT_MAX+1))(w/overflow) | ((INT_MAX+1)-(INT_MAX+1))..(INT_MAX+1) |
// Create temporaries
assert_eq!(types::F32X4, ctx.input_ty(insn, 0));
let tmp1 = ctx.alloc_tmp(types::I32X4).only_reg().unwrap();
let tmp2 = ctx.alloc_tmp(types::I32X4).only_reg().unwrap();
// Converting to unsigned int so if float src is negative or NaN
// will first set to zero.
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp2), tmp2));
ctx.emit(Inst::gen_move(dst, src, input_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Maxps, RegMem::from(tmp2), dst));
// Set tmp2 to INT_MAX+1. It is important to note here that after it looks
// like we are only converting INT_MAX (0x7FFFFFFF) but in fact because
// single precision IEEE-754 floats can only accurately represent contingous
// integers up to 2^23 and outside of this range it rounds to the closest
// integer that it can represent. In the case of INT_MAX, this value gets
// represented as 0x4f000000 which is the integer value (INT_MAX+1).
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pcmpeqd, RegMem::from(tmp2), tmp2));
ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Psrld, RegMemImm::imm(1), tmp2));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Cvtdq2ps,
RegMem::from(tmp2),
tmp2,
));
// Make a copy of these lanes and then do the first conversion.
// Overflow lanes greater than the maximum allowed signed value will
// set to 0x80000000. Negative and NaN lanes will be 0x0
ctx.emit(Inst::xmm_mov(SseOpcode::Movaps, RegMem::from(dst), tmp1));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Cvttps2dq, RegMem::from(dst), dst));
// Set lanes to src - max_signed_int
ctx.emit(Inst::xmm_rm_r(SseOpcode::Subps, RegMem::from(tmp2), tmp1));
// Create mask for all positive lanes to saturate (i.e. greater than
// or equal to the maxmimum allowable unsigned int).
let cond = FcmpImm::from(FloatCC::LessThanOrEqual);
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Cmpps,
RegMem::from(tmp1),
tmp2,
cond.encode(),
OperandSize::Size32,
));
// Convert those set of lanes that have the max_signed_int factored out.
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Cvttps2dq,
RegMem::from(tmp1),
tmp1,
));
// Prepare converted lanes by zeroing negative lanes and prepping lanes
// that have positive overflow (based on the mask) by setting these lanes
// to 0x7FFFFFFF
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp2), tmp1));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp2), tmp2));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmaxsd, RegMem::from(tmp2), tmp1));
// Add this second set of converted lanes to the original to properly handle
// values greater than max signed int.
ctx.emit(Inst::xmm_rm_r(SseOpcode::Paddd, RegMem::from(tmp1), dst));
} else {
// Since this branch is also guarded by a check for vector types
// neither Opcode::FcvtToUint nor Opcode::FcvtToSint can reach here
// due to vector varients not existing. The first two branches will
// cover all reachable cases.
unreachable!();
}
}
}
Opcode::IaddPairwise => {
if let (Some(swiden_low), Some(swiden_high)) = (
matches_input(ctx, inputs[0], Opcode::SwidenLow),
matches_input(ctx, inputs[1], Opcode::SwidenHigh),
) {
let swiden_input = &[
InsnInput {
insn: swiden_low,
input: 0,
},
InsnInput {
insn: swiden_high,
input: 0,
},
];
let input_ty = ctx.input_ty(swiden_low, 0);
let output_ty = ctx.output_ty(insn, 0);
let src0 = put_input_in_reg(ctx, swiden_input[0]);
let src1 = put_input_in_reg(ctx, swiden_input[1]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
if src0 != src1 {
unimplemented!(
"iadd_pairwise not implemented for general case with different inputs"
);
}
match (input_ty, output_ty) {
(types::I8X16, types::I16X8) => {
static MUL_CONST: [u8; 16] = [0x01; 16];
let mul_const = ctx.use_constant(VCodeConstantData::WellKnown(&MUL_CONST));
let mul_const_reg = ctx.alloc_tmp(types::I8X16).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(mul_const, mul_const_reg, types::I8X16));
ctx.emit(Inst::xmm_mov(
SseOpcode::Movdqa,
RegMem::reg(mul_const_reg.to_reg()),
dst,
));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmaddubsw, RegMem::reg(src0), dst));
}
(types::I16X8, types::I32X4) => {
static MUL_CONST: [u8; 16] = [
0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,
0x01, 0x00, 0x01, 0x00,
];
let mul_const = ctx.use_constant(VCodeConstantData::WellKnown(&MUL_CONST));
let mul_const_reg = ctx.alloc_tmp(types::I16X8).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(mul_const, mul_const_reg, types::I16X8));
ctx.emit(Inst::xmm_mov(SseOpcode::Movdqa, RegMem::reg(src0), dst));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pmaddwd,
RegMem::reg(mul_const_reg.to_reg()),
dst,
));
}
_ => {
unimplemented!("Type not supported for {:?}", op);
}
}
} else if let (Some(uwiden_low), Some(uwiden_high)) = (
matches_input(ctx, inputs[0], Opcode::UwidenLow),
matches_input(ctx, inputs[1], Opcode::UwidenHigh),
) {
let uwiden_input = &[
InsnInput {
insn: uwiden_low,
input: 0,
},
InsnInput {
insn: uwiden_high,
input: 0,
},
];
let input_ty = ctx.input_ty(uwiden_low, 0);
let output_ty = ctx.output_ty(insn, 0);
let src0 = put_input_in_reg(ctx, uwiden_input[0]);
let src1 = put_input_in_reg(ctx, uwiden_input[1]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
if src0 != src1 {
unimplemented!(
"iadd_pairwise not implemented for general case with different inputs"
);
}
match (input_ty, output_ty) {
(types::I8X16, types::I16X8) => {
static MUL_CONST: [u8; 16] = [0x01; 16];
let mul_const = ctx.use_constant(VCodeConstantData::WellKnown(&MUL_CONST));
let mul_const_reg = ctx.alloc_tmp(types::I8X16).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(mul_const, mul_const_reg, types::I8X16));
ctx.emit(Inst::xmm_mov(SseOpcode::Movdqa, RegMem::reg(src0), dst));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pmaddubsw,
RegMem::reg(mul_const_reg.to_reg()),
dst,
));
}
(types::I16X8, types::I32X4) => {
static PXOR_CONST: [u8; 16] = [
0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80,
0x00, 0x80, 0x00, 0x80,
];
let pxor_const =
ctx.use_constant(VCodeConstantData::WellKnown(&PXOR_CONST));
let pxor_const_reg = ctx.alloc_tmp(types::I16X8).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(
pxor_const,
pxor_const_reg,
types::I16X8,
));
ctx.emit(Inst::xmm_mov(SseOpcode::Movdqa, RegMem::reg(src0), dst));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pxor,
RegMem::reg(pxor_const_reg.to_reg()),
dst,
));
static MADD_CONST: [u8; 16] = [
0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,
0x01, 0x00, 0x01, 0x00,
];
let madd_const =
ctx.use_constant(VCodeConstantData::WellKnown(&MADD_CONST));
let madd_const_reg = ctx.alloc_tmp(types::I8X16).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(
madd_const,
madd_const_reg,
types::I16X8,
));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pmaddwd,
RegMem::reg(madd_const_reg.to_reg()),
dst,
));
static ADDD_CONST2: [u8; 16] = [
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x00, 0x01, 0x00,
];
let addd_const2 =
ctx.use_constant(VCodeConstantData::WellKnown(&ADDD_CONST2));
let addd_const2_reg = ctx.alloc_tmp(types::I8X16).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(
addd_const2,
addd_const2_reg,
types::I16X8,
));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Paddd,
RegMem::reg(addd_const2_reg.to_reg()),
dst,
));
}
_ => {
unimplemented!("Type not supported for {:?}", op);
}
}
} else {
unimplemented!("Operands not supported for {:?}", op);
}
}
Opcode::UwidenHigh | Opcode::UwidenLow | Opcode::SwidenHigh | Opcode::SwidenLow => {
let input_ty = ctx.input_ty(insn, 0);
let output_ty = ctx.output_ty(insn, 0);
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
if output_ty.is_vector() {
match op {
Opcode::SwidenLow => match (input_ty, output_ty) {
(types::I8X16, types::I16X8) => {
ctx.emit(Inst::xmm_mov(SseOpcode::Pmovsxbw, RegMem::reg(src), dst));
}
(types::I16X8, types::I32X4) => {
ctx.emit(Inst::xmm_mov(SseOpcode::Pmovsxwd, RegMem::reg(src), dst));
}
(types::I32X4, types::I64X2) => {
ctx.emit(Inst::xmm_mov(SseOpcode::Pmovsxdq, RegMem::reg(src), dst));
}
_ => unreachable!(),
},
Opcode::SwidenHigh => match (input_ty, output_ty) {
(types::I8X16, types::I16X8) => {
ctx.emit(Inst::gen_move(dst, src, output_ty));
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Palignr,
RegMem::reg(src),
dst,
8,
OperandSize::Size32,
));
ctx.emit(Inst::xmm_mov(SseOpcode::Pmovsxbw, RegMem::from(dst), dst));
}
(types::I16X8, types::I32X4) => {
ctx.emit(Inst::gen_move(dst, src, output_ty));
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Palignr,
RegMem::reg(src),
dst,
8,
OperandSize::Size32,
));
ctx.emit(Inst::xmm_mov(SseOpcode::Pmovsxwd, RegMem::from(dst), dst));
}
(types::I32X4, types::I64X2) => {
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Pshufd,
RegMem::reg(src),
dst,
0xEE,
OperandSize::Size32,
));
ctx.emit(Inst::xmm_mov(SseOpcode::Pmovsxdq, RegMem::from(dst), dst));
}
_ => unreachable!(),
},
Opcode::UwidenLow => match (input_ty, output_ty) {
(types::I8X16, types::I16X8) => {
ctx.emit(Inst::xmm_mov(SseOpcode::Pmovzxbw, RegMem::reg(src), dst));
}
(types::I16X8, types::I32X4) => {
ctx.emit(Inst::xmm_mov(SseOpcode::Pmovzxwd, RegMem::reg(src), dst));
}
(types::I32X4, types::I64X2) => {
ctx.emit(Inst::xmm_mov(SseOpcode::Pmovzxdq, RegMem::reg(src), dst));
}
_ => unreachable!(),
},
Opcode::UwidenHigh => match (input_ty, output_ty) {
(types::I8X16, types::I16X8) => {
ctx.emit(Inst::gen_move(dst, src, output_ty));
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Palignr,
RegMem::reg(src),
dst,
8,
OperandSize::Size32,
));
ctx.emit(Inst::xmm_mov(SseOpcode::Pmovzxbw, RegMem::from(dst), dst));
}
(types::I16X8, types::I32X4) => {
ctx.emit(Inst::gen_move(dst, src, output_ty));
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Palignr,
RegMem::reg(src),
dst,
8,
OperandSize::Size32,
));
ctx.emit(Inst::xmm_mov(SseOpcode::Pmovzxwd, RegMem::from(dst), dst));
}
(types::I32X4, types::I64X2) => {
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Pshufd,
RegMem::reg(src),
dst,
0xEE,
OperandSize::Size32,
));
ctx.emit(Inst::xmm_mov(SseOpcode::Pmovzxdq, RegMem::from(dst), dst));
}
_ => unreachable!(),
},
_ => unreachable!(),
}
} else {
panic!("Unsupported non-vector type for widen instruction {:?}", ty);
}
}
Opcode::Snarrow | Opcode::Unarrow => {
let input_ty = ctx.input_ty(insn, 0);
let output_ty = ctx.output_ty(insn, 0);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
if output_ty.is_vector() {
match op {
Opcode::Snarrow => match (input_ty, output_ty) {
(types::I16X8, types::I8X16) => {
let src1 = put_input_in_reg(ctx, inputs[0]);
let src2 = put_input_in_reg(ctx, inputs[1]);
ctx.emit(Inst::gen_move(dst, src1, input_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Packsswb, RegMem::reg(src2), dst));
}
(types::I32X4, types::I16X8) => {
let src1 = put_input_in_reg(ctx, inputs[0]);
let src2 = put_input_in_reg(ctx, inputs[1]);
ctx.emit(Inst::gen_move(dst, src1, input_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Packssdw, RegMem::reg(src2), dst));
}
// TODO: The type we are expecting as input as actually an F64X2 but the instruction is only defined
// for integers so here we use I64X2. This is a separate issue that needs to be fixed in instruction.rs.
(types::I64X2, types::I32X4) => {
if let Some(fcvt_inst) =
matches_input(ctx, inputs[0], Opcode::FcvtToSintSat)
{
//y = i32x4.trunc_sat_f64x2_s_zero(x) is lowered to:
//MOVE xmm_tmp, xmm_x
//CMPEQPD xmm_tmp, xmm_x
//MOVE xmm_y, xmm_x
//ANDPS xmm_tmp, [wasm_f64x2_splat(2147483647.0)]
//MINPD xmm_y, xmm_tmp
//CVTTPD2DQ xmm_y, xmm_y
let fcvt_input = InsnInput {
insn: fcvt_inst,
input: 0,
};
let src = put_input_in_reg(ctx, fcvt_input);
ctx.emit(Inst::gen_move(dst, src, input_ty));
let tmp1 = ctx.alloc_tmp(output_ty).only_reg().unwrap();
ctx.emit(Inst::gen_move(tmp1, src, input_ty));
let cond = FcmpImm::from(FloatCC::Equal);
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Cmppd,
RegMem::reg(src),
tmp1,
cond.encode(),
OperandSize::Size32,
));
// 2147483647.0 is equivalent to 0x41DFFFFFFFC00000
static UMAX_MASK: [u8; 16] = [
0x00, 0x00, 0xC0, 0xFF, 0xFF, 0xFF, 0xDF, 0x41, 0x00, 0x00,
0xC0, 0xFF, 0xFF, 0xFF, 0xDF, 0x41,
];
let umax_const =
ctx.use_constant(VCodeConstantData::WellKnown(&UMAX_MASK));
let umax_mask = ctx.alloc_tmp(types::F64X2).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(umax_const, umax_mask, types::F64X2));
//ANDPD xmm_y, [wasm_f64x2_splat(2147483647.0)]
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Andps,
RegMem::from(umax_mask),
tmp1,
));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Minpd, RegMem::from(tmp1), dst));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Cvttpd2dq,
RegMem::from(dst),
dst,
));
} else {
unreachable!();
}
}
_ => unreachable!(),
},
Opcode::Unarrow => match (input_ty, output_ty) {
(types::I16X8, types::I8X16) => {
let src1 = put_input_in_reg(ctx, inputs[0]);
let src2 = put_input_in_reg(ctx, inputs[1]);
ctx.emit(Inst::gen_move(dst, src1, input_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Packuswb, RegMem::reg(src2), dst));
}
(types::I32X4, types::I16X8) => {
let src1 = put_input_in_reg(ctx, inputs[0]);
let src2 = put_input_in_reg(ctx, inputs[1]);
ctx.emit(Inst::gen_move(dst, src1, input_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Packusdw, RegMem::reg(src2), dst));
}
_ => unreachable!(),
},
_ => unreachable!(),
}
} else {
panic!("Unsupported non-vector type for widen instruction {:?}", ty);
}
}
Opcode::Bitcast => {
let input_ty = ctx.input_ty(insn, 0);
let output_ty = ctx.output_ty(insn, 0);
match (input_ty, output_ty) {
(types::F32, types::I32) => {
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::xmm_to_gpr(
SseOpcode::Movd,
src,
dst,
OperandSize::Size32,
));
}
(types::I32, types::F32) => {
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::gpr_to_xmm(
SseOpcode::Movd,
src,
OperandSize::Size32,
dst,
));
}
(types::F64, types::I64) => {
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::xmm_to_gpr(
SseOpcode::Movq,
src,
dst,
OperandSize::Size64,
));
}
(types::I64, types::F64) => {
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::gpr_to_xmm(
SseOpcode::Movq,
src,
OperandSize::Size64,
dst,
));
}
_ => unreachable!("invalid bitcast from {:?} to {:?}", input_ty, output_ty),
}
}
Opcode::Fabs | Opcode::Fneg => {
let src = RegMem::reg(put_input_in_reg(ctx, inputs[0]));
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
// In both cases, generate a constant and apply a single binary instruction:
// - to compute the absolute value, set all bits to 1 but the MSB to 0, and bit-AND the
// src with it.
// - to compute the negated value, set all bits to 0 but the MSB to 1, and bit-XOR the
// src with it.
let output_ty = ty.unwrap();
if !output_ty.is_vector() {
let (val, opcode): (u64, _) = match output_ty {
types::F32 => match op {
Opcode::Fabs => (0x7fffffff, SseOpcode::Andps),
Opcode::Fneg => (0x80000000, SseOpcode::Xorps),
_ => unreachable!(),
},
types::F64 => match op {
Opcode::Fabs => (0x7fffffffffffffff, SseOpcode::Andpd),
Opcode::Fneg => (0x8000000000000000, SseOpcode::Xorpd),
_ => unreachable!(),
},
_ => panic!("unexpected type {:?} for Fabs", output_ty),
};
for inst in Inst::gen_constant(ValueRegs::one(dst), val as u128, output_ty, |ty| {
ctx.alloc_tmp(ty).only_reg().unwrap()
}) {
ctx.emit(inst);
}
ctx.emit(Inst::xmm_rm_r(opcode, src, dst));
} else {
// Eventually vector constants should be available in `gen_constant` and this block
// can be merged with the one above (TODO).
if output_ty.bits() == 128 {
// Move the `lhs` to the same register as `dst`; this may not emit an actual move
// but ensures that the registers are the same to match x86's read-write operand
// encoding.
let src = put_input_in_reg(ctx, inputs[0]);
ctx.emit(Inst::gen_move(dst, src, output_ty));
// Generate an all 1s constant in an XMM register. This uses CMPPS but could
// have used CMPPD with the same effect. Note, we zero the temp we allocate
// because if not, there is a chance that the register we use could be initialized
// with NaN .. in which case the CMPPS would fail since NaN != NaN.
let tmp = ctx.alloc_tmp(output_ty).only_reg().unwrap();
ctx.emit(Inst::xmm_rm_r(SseOpcode::Xorps, RegMem::from(tmp), tmp));
let cond = FcmpImm::from(FloatCC::Equal);
let cmpps = Inst::xmm_rm_r_imm(
SseOpcode::Cmpps,
RegMem::reg(tmp.to_reg()),
tmp,
cond.encode(),
OperandSize::Size32,
);
ctx.emit(cmpps);
// Shift the all 1s constant to generate the mask.
let lane_bits = output_ty.lane_bits();
let (shift_opcode, opcode, shift_by) = match (op, lane_bits) {
(Opcode::Fabs, _) => {
unreachable!(
"implemented in ISLE: inst = `{}`, type = `{:?}`",
ctx.dfg().display_inst(insn),
ty
);
}
(Opcode::Fneg, 32) => (SseOpcode::Pslld, SseOpcode::Xorps, 31),
(Opcode::Fneg, 64) => (SseOpcode::Psllq, SseOpcode::Xorpd, 63),
_ => unreachable!(
"unexpected opcode and lane size: {:?}, {} bits",
op, lane_bits
),
};
let shift = Inst::xmm_rmi_reg(shift_opcode, RegMemImm::imm(shift_by), tmp);
ctx.emit(shift);
// Apply shifted mask (XOR or AND).
let mask = Inst::xmm_rm_r(opcode, RegMem::reg(tmp.to_reg()), dst);
ctx.emit(mask);
} else {
panic!("unexpected type {:?} for Fabs", output_ty);
}
}
}
Opcode::Fcopysign => {
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = put_input_in_reg(ctx, inputs[1]);
let ty = ty.unwrap();
// We're going to generate the following sequence:
//
// movabs $INT_MIN, tmp_gpr1
// mov{d,q} tmp_gpr1, tmp_xmm1
// movap{s,d} tmp_xmm1, dst
// andnp{s,d} src_1, dst
// movap{s,d} src_2, tmp_xmm2
// andp{s,d} tmp_xmm1, tmp_xmm2
// orp{s,d} tmp_xmm2, dst
let tmp_xmm1 = ctx.alloc_tmp(types::F32).only_reg().unwrap();
let tmp_xmm2 = ctx.alloc_tmp(types::F32).only_reg().unwrap();
let (sign_bit_cst, mov_op, and_not_op, and_op, or_op) = match ty {
types::F32 => (
0x8000_0000,
SseOpcode::Movaps,
SseOpcode::Andnps,
SseOpcode::Andps,
SseOpcode::Orps,
),
types::F64 => (
0x8000_0000_0000_0000,
SseOpcode::Movapd,
SseOpcode::Andnpd,
SseOpcode::Andpd,
SseOpcode::Orpd,
),
_ => {
panic!("unexpected type {:?} for copysign", ty);
}
};
for inst in Inst::gen_constant(ValueRegs::one(tmp_xmm1), sign_bit_cst, ty, |ty| {
ctx.alloc_tmp(ty).only_reg().unwrap()
}) {
ctx.emit(inst);
}
ctx.emit(Inst::xmm_mov(mov_op, RegMem::reg(tmp_xmm1.to_reg()), dst));
ctx.emit(Inst::xmm_rm_r(and_not_op, RegMem::reg(lhs), dst));
ctx.emit(Inst::xmm_mov(mov_op, RegMem::reg(rhs), tmp_xmm2));
ctx.emit(Inst::xmm_rm_r(
and_op,
RegMem::reg(tmp_xmm1.to_reg()),
tmp_xmm2,
));
ctx.emit(Inst::xmm_rm_r(or_op, RegMem::reg(tmp_xmm2.to_reg()), dst));
}
Opcode::Ceil | Opcode::Floor | Opcode::Nearest | Opcode::Trunc => {
let ty = ty.unwrap();
if isa_flags.use_sse41() {
let mode = match op {
Opcode::Ceil => RoundImm::RoundUp,
Opcode::Floor => RoundImm::RoundDown,
Opcode::Nearest => RoundImm::RoundNearest,
Opcode::Trunc => RoundImm::RoundZero,
_ => panic!("unexpected opcode {:?} in Ceil/Floor/Nearest/Trunc", op),
};
let op = match ty {
types::F32 => SseOpcode::Roundss,
types::F64 => SseOpcode::Roundsd,
types::F32X4 => SseOpcode::Roundps,
types::F64X2 => SseOpcode::Roundpd,
_ => panic!("unexpected type {:?} in Ceil/Floor/Nearest/Trunc", ty),
};
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::xmm_rm_r_imm(
op,
src,
dst,
mode.encode(),
OperandSize::Size32,
));
} else {
// Lower to VM calls when there's no access to SSE4.1.
// Note, for vector types on platforms that don't support sse41
// the execution will panic here.
let libcall = match (op, ty) {
(Opcode::Ceil, types::F32) => LibCall::CeilF32,
(Opcode::Ceil, types::F64) => LibCall::CeilF64,
(Opcode::Floor, types::F32) => LibCall::FloorF32,
(Opcode::Floor, types::F64) => LibCall::FloorF64,
(Opcode::Nearest, types::F32) => LibCall::NearestF32,
(Opcode::Nearest, types::F64) => LibCall::NearestF64,
(Opcode::Trunc, types::F32) => LibCall::TruncF32,
(Opcode::Trunc, types::F64) => LibCall::TruncF64,
_ => panic!(
"unexpected type/opcode {:?}/{:?} in Ceil/Floor/Nearest/Trunc",
ty, op
),
};
emit_vm_call(ctx, flags, triple, libcall, insn, inputs, outputs)?;
}
}
Opcode::Load
| Opcode::Uload8
| Opcode::Sload8
| Opcode::Uload16
| Opcode::Sload16
| Opcode::Uload32
| Opcode::Sload32
| Opcode::LoadComplex
| Opcode::Uload8Complex
| Opcode::Sload8Complex
| Opcode::Uload16Complex
| Opcode::Sload16Complex
| Opcode::Uload32Complex
| Opcode::Sload32Complex
| Opcode::Sload8x8
| Opcode::Uload8x8
| Opcode::Sload16x4
| Opcode::Uload16x4
| Opcode::Sload32x2
| Opcode::Uload32x2 => {
let offset = ctx.data(insn).load_store_offset().unwrap();
let elem_ty = match op {
Opcode::Sload8 | Opcode::Uload8 | Opcode::Sload8Complex | Opcode::Uload8Complex => {
types::I8
}
Opcode::Sload16
| Opcode::Uload16
| Opcode::Sload16Complex
| Opcode::Uload16Complex => types::I16,
Opcode::Sload32
| Opcode::Uload32
| Opcode::Sload32Complex
| Opcode::Uload32Complex => types::I32,
Opcode::Sload8x8
| Opcode::Uload8x8
| Opcode::Sload8x8Complex
| Opcode::Uload8x8Complex => types::I8X8,
Opcode::Sload16x4
| Opcode::Uload16x4
| Opcode::Sload16x4Complex
| Opcode::Uload16x4Complex => types::I16X4,
Opcode::Sload32x2
| Opcode::Uload32x2
| Opcode::Sload32x2Complex
| Opcode::Uload32x2Complex => types::I32X2,
Opcode::Load | Opcode::LoadComplex => ctx.output_ty(insn, 0),
_ => unimplemented!(),
};
let ext_mode = ExtMode::new(elem_ty.bits(), 64);
let sign_extend = match op {
Opcode::Sload8
| Opcode::Sload8Complex
| Opcode::Sload16
| Opcode::Sload16Complex
| Opcode::Sload32
| Opcode::Sload32Complex
| Opcode::Sload8x8
| Opcode::Sload8x8Complex
| Opcode::Sload16x4
| Opcode::Sload16x4Complex
| Opcode::Sload32x2
| Opcode::Sload32x2Complex => true,
_ => false,
};
let amode = match op {
Opcode::Load
| Opcode::Uload8
| Opcode::Sload8
| Opcode::Uload16
| Opcode::Sload16
| Opcode::Uload32
| Opcode::Sload32
| Opcode::Sload8x8
| Opcode::Uload8x8
| Opcode::Sload16x4
| Opcode::Uload16x4
| Opcode::Sload32x2
| Opcode::Uload32x2 => {
assert_eq!(inputs.len(), 1, "only one input for load operands");
lower_to_amode(ctx, inputs[0], offset)
}
Opcode::LoadComplex
| Opcode::Uload8Complex
| Opcode::Sload8Complex
| Opcode::Uload16Complex
| Opcode::Sload16Complex
| Opcode::Uload32Complex
| Opcode::Sload32Complex
| Opcode::Sload8x8Complex
| Opcode::Uload8x8Complex
| Opcode::Sload16x4Complex
| Opcode::Uload16x4Complex
| Opcode::Sload32x2Complex
| Opcode::Uload32x2Complex => {
assert_eq!(
inputs.len(),
2,
"can't handle more than two inputs in complex load"
);
let base = put_input_in_reg(ctx, inputs[0]);
let index = put_input_in_reg(ctx, inputs[1]);
let shift = 0;
let flags = ctx.memflags(insn).expect("load should have memflags");
Amode::imm_reg_reg_shift(offset as u32, base, index, shift).with_flags(flags)
}
_ => unreachable!(),
};
if elem_ty == types::I128 {
let dsts = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::mov64_m_r(amode.clone(), dsts.regs()[0]));
ctx.emit(Inst::mov64_m_r(amode.offset(8), dsts.regs()[1]));
} else {
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let is_xmm = elem_ty.is_float() || elem_ty.is_vector();
match (sign_extend, is_xmm) {
(true, false) => {
// The load is sign-extended only when the output size is lower than 64 bits,
// so ext-mode is defined in this case.
ctx.emit(Inst::movsx_rm_r(ext_mode.unwrap(), RegMem::mem(amode), dst));
}
(false, false) => {
if elem_ty.bytes() == 8 {
// Use a plain load.
ctx.emit(Inst::mov64_m_r(amode, dst))
} else {
// Use a zero-extended load.
ctx.emit(Inst::movzx_rm_r(ext_mode.unwrap(), RegMem::mem(amode), dst))
}
}
(_, true) => {
ctx.emit(match elem_ty {
types::F32 => Inst::xmm_mov(SseOpcode::Movss, RegMem::mem(amode), dst),
types::F64 => Inst::xmm_mov(SseOpcode::Movsd, RegMem::mem(amode), dst),
types::I8X8 => {
if sign_extend == true {
Inst::xmm_mov(SseOpcode::Pmovsxbw, RegMem::mem(amode), dst)
} else {
Inst::xmm_mov(SseOpcode::Pmovzxbw, RegMem::mem(amode), dst)
}
}
types::I16X4 => {
if sign_extend == true {
Inst::xmm_mov(SseOpcode::Pmovsxwd, RegMem::mem(amode), dst)
} else {
Inst::xmm_mov(SseOpcode::Pmovzxwd, RegMem::mem(amode), dst)
}
}
types::I32X2 => {
if sign_extend == true {
Inst::xmm_mov(SseOpcode::Pmovsxdq, RegMem::mem(amode), dst)
} else {
Inst::xmm_mov(SseOpcode::Pmovzxdq, RegMem::mem(amode), dst)
}
}
_ if elem_ty.is_vector() && elem_ty.bits() == 128 => {
Inst::xmm_mov(SseOpcode::Movups, RegMem::mem(amode), dst)
}
// TODO Specialize for different types: MOVUPD, MOVDQU
_ => unreachable!(
"unexpected type for load: {:?} - {:?}",
elem_ty,
elem_ty.bits()
),
});
}
}
}
}
Opcode::Store
| Opcode::Istore8
| Opcode::Istore16
| Opcode::Istore32
| Opcode::StoreComplex
| Opcode::Istore8Complex
| Opcode::Istore16Complex
| Opcode::Istore32Complex => {
let offset = ctx.data(insn).load_store_offset().unwrap();
let elem_ty = match op {
Opcode::Istore8 | Opcode::Istore8Complex => types::I8,
Opcode::Istore16 | Opcode::Istore16Complex => types::I16,
Opcode::Istore32 | Opcode::Istore32Complex => types::I32,
Opcode::Store | Opcode::StoreComplex => ctx.input_ty(insn, 0),
_ => unreachable!(),
};
let addr = match op {
Opcode::Store | Opcode::Istore8 | Opcode::Istore16 | Opcode::Istore32 => {
assert_eq!(inputs.len(), 2, "only one input for store memory operands");
lower_to_amode(ctx, inputs[1], offset)
}
Opcode::StoreComplex
| Opcode::Istore8Complex
| Opcode::Istore16Complex
| Opcode::Istore32Complex => {
assert_eq!(
inputs.len(),
3,
"can't handle more than two inputs in complex store"
);
let base = put_input_in_reg(ctx, inputs[1]);
let index = put_input_in_reg(ctx, inputs[2]);
let shift = 0;
let flags = ctx.memflags(insn).expect("store should have memflags");
Amode::imm_reg_reg_shift(offset as u32, base, index, shift).with_flags(flags)
}
_ => unreachable!(),
};
if elem_ty == types::I128 {
let srcs = put_input_in_regs(ctx, inputs[0]);
ctx.emit(Inst::store(types::I64, srcs.regs()[0], addr.clone()));
ctx.emit(Inst::store(types::I64, srcs.regs()[1], addr.offset(8)));
} else {
let src = put_input_in_reg(ctx, inputs[0]);
ctx.emit(Inst::store(elem_ty, src, addr));
}
}
Opcode::AtomicRmw => {
// This is a simple, general-case atomic update, based on a loop involving
// `cmpxchg`. Note that we could do much better than this in the case where the old
// value at the location (that is to say, the SSA `Value` computed by this CLIF
// instruction) is not required. In that case, we could instead implement this
// using a single `lock`-prefixed x64 read-modify-write instruction. Also, even in
// the case where the old value is required, for the `add` and `sub` cases, we can
// use the single instruction `lock xadd`. However, those improvements have been
// left for another day.
// TODO: filed as https://github.com/bytecodealliance/wasmtime/issues/2153
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let mut addr = put_input_in_reg(ctx, inputs[0]);
let mut arg2 = put_input_in_reg(ctx, inputs[1]);
let ty_access = ty.unwrap();
assert!(is_valid_atomic_transaction_ty(ty_access));
// Make sure that both args are in virtual regs, since in effect we have to do a
// parallel copy to get them safely to the AtomicRmwSeq input regs, and that's not
// guaranteed safe if either is in a real reg.
addr = ctx.ensure_in_vreg(addr, types::I64);
arg2 = ctx.ensure_in_vreg(arg2, types::I64);
// Move the args to the preordained AtomicRMW input regs. Note that `AtomicRmwSeq`
// operates at whatever width is specified by `ty`, so there's no need to
// zero-extend `arg2` in the case of `ty` being I8/I16/I32.
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::r9()),
addr,
types::I64,
));
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::r10()),
arg2,
types::I64,
));
// Now the AtomicRmwSeq (pseudo-) instruction itself
let op = inst_common::AtomicRmwOp::from(ctx.data(insn).atomic_rmw_op().unwrap());
ctx.emit(Inst::AtomicRmwSeq {
ty: ty_access,
op,
address: regs::r9(),
operand: regs::r10(),
temp: Writable::from_reg(regs::r11()),
dst_old: Writable::from_reg(regs::rax()),
});
// And finally, copy the preordained AtomicRmwSeq output reg to its destination.
ctx.emit(Inst::gen_move(dst, regs::rax(), types::I64));
}
Opcode::AtomicCas => {
// This is very similar to, but not identical to, the `AtomicRmw` case. As with
// `AtomicRmw`, there's no need to zero-extend narrow values here.
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let addr = lower_to_amode(ctx, inputs[0], 0);
let expected = put_input_in_reg(ctx, inputs[1]);
let replacement = put_input_in_reg(ctx, inputs[2]);
let ty_access = ty.unwrap();
assert!(is_valid_atomic_transaction_ty(ty_access));
// Move the expected value into %rax. Because there's only one fixed register on
// the input side, we don't have to use `ensure_in_vreg`, as is necessary in the
// `AtomicRmw` case.
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::rax()),
expected,
types::I64,
));
ctx.emit(Inst::LockCmpxchg {
ty: ty_access,
mem: addr.into(),
replacement,
expected: regs::rax(),
dst_old: Writable::from_reg(regs::rax()),
});
// And finally, copy the old value at the location to its destination reg.
ctx.emit(Inst::gen_move(dst, regs::rax(), types::I64));
}
Opcode::AtomicLoad => {
// This is a normal load. The x86-TSO memory model provides sufficient sequencing
// to satisfy the CLIF synchronisation requirements for `AtomicLoad` without the
// need for any fence instructions.
let data = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let addr = lower_to_amode(ctx, inputs[0], 0);
let ty_access = ty.unwrap();
assert!(is_valid_atomic_transaction_ty(ty_access));
let rm = RegMem::mem(addr);
if ty_access == types::I64 {
ctx.emit(Inst::mov64_rm_r(rm, data));
} else {
let ext_mode = ExtMode::new(ty_access.bits(), 64).unwrap_or_else(|| {
panic!(
"invalid extension during AtomicLoad: {} -> {}",
ty_access.bits(),
64
)
});
ctx.emit(Inst::movzx_rm_r(ext_mode, rm, data));
}
}
Opcode::AtomicStore => {
// This is a normal store, followed by an `mfence` instruction.
let data = put_input_in_reg(ctx, inputs[0]);
let addr = lower_to_amode(ctx, inputs[1], 0);
let ty_access = ctx.input_ty(insn, 0);
assert!(is_valid_atomic_transaction_ty(ty_access));
ctx.emit(Inst::store(ty_access, data, addr));
ctx.emit(Inst::Fence {
kind: FenceKind::MFence,
});
}
Opcode::Fence => {
ctx.emit(Inst::Fence {
kind: FenceKind::MFence,
});
}
Opcode::FuncAddr => {
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let (extname, _) = ctx.call_target(insn).unwrap();
let extname = extname.clone();
ctx.emit(Inst::LoadExtName {
dst,
name: Box::new(extname),
offset: 0,
});
}
Opcode::SymbolValue => {
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let (extname, _, offset) = ctx.symbol_value(insn).unwrap();
let extname = extname.clone();
ctx.emit(Inst::LoadExtName {
dst,
name: Box::new(extname),
offset,
});
}
Opcode::StackAddr => {
let (stack_slot, offset) = match *ctx.data(insn) {
InstructionData::StackLoad {
opcode: Opcode::StackAddr,
stack_slot,
offset,
} => (stack_slot, offset),
_ => unreachable!(),
};
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let offset: i32 = offset.into();
let inst = ctx
.abi()
.stackslot_addr(stack_slot, u32::try_from(offset).unwrap(), dst);
ctx.emit(inst);
}
Opcode::Select => {
let flag_input = inputs[0];
if let Some(fcmp) = matches_input(ctx, flag_input, Opcode::Fcmp) {
let cond_code = ctx.data(fcmp).fp_cond_code().unwrap();
// For equal, we flip the operands, because we can't test a conjunction of
// CPU flags with a single cmove; see InvertedEqualOrConditions doc comment.
let (lhs_input, rhs_input) = match cond_code {
FloatCC::Equal => (inputs[2], inputs[1]),
_ => (inputs[1], inputs[2]),
};
let ty = ctx.output_ty(insn, 0);
let rhs = put_input_in_regs(ctx, rhs_input);
let dst = get_output_reg(ctx, outputs[0]);
let lhs = put_input_in_regs(ctx, lhs_input);
// We request inversion of Equal to NotEqual here: taking LHS if equal would mean
// take it if both CC::NP and CC::Z are set, the conjunction of which can't be
// modeled with a single cmov instruction. Instead, we'll swap LHS and RHS in the
// select operation, and invert the equal to a not-equal here.
let fcmp_results = emit_fcmp(ctx, fcmp, cond_code, FcmpSpec::InvertEqual);
if let FcmpCondResult::InvertedEqualOrConditions(_, _) = &fcmp_results {
// Keep this sync'd with the lowering of the select inputs above.
assert_eq!(cond_code, FloatCC::Equal);
}
emit_moves(ctx, dst, rhs, ty);
let operand_size = if ty == types::F64 {
OperandSize::Size64
} else {
OperandSize::Size32
};
match fcmp_results {
FcmpCondResult::Condition(cc) => {
if is_int_or_ref_ty(ty) || ty == types::I128 || ty == types::B128 {
let size = ty.bytes() as u8;
emit_cmoves(ctx, size, cc, lhs, dst);
} else {
ctx.emit(Inst::xmm_cmove(
operand_size,
cc,
RegMem::reg(lhs.only_reg().unwrap()),
dst.only_reg().unwrap(),
));
}
}
FcmpCondResult::AndConditions(_, _) => {
unreachable!(
"can't AND with select; see above comment about inverting equal"
);
}
FcmpCondResult::InvertedEqualOrConditions(cc1, cc2)
| FcmpCondResult::OrConditions(cc1, cc2) => {
if is_int_or_ref_ty(ty) || ty == types::I128 {
let size = ty.bytes() as u8;
emit_cmoves(ctx, size, cc1, lhs.clone(), dst);
emit_cmoves(ctx, size, cc2, lhs, dst);
} else {
ctx.emit(Inst::xmm_cmove(
operand_size,
cc1,
RegMem::reg(lhs.only_reg().unwrap()),
dst.only_reg().unwrap(),
));
ctx.emit(Inst::xmm_cmove(
operand_size,
cc2,
RegMem::reg(lhs.only_reg().unwrap()),
dst.only_reg().unwrap(),
));
}
}
}
} else {
let ty = ty.unwrap();
let size = ty.bytes() as u8;
let lhs = put_input_in_regs(ctx, inputs[1]);
let rhs = put_input_in_regs(ctx, inputs[2]);
let dst = get_output_reg(ctx, outputs[0]);
let cc = if let Some(icmp) = matches_input(ctx, flag_input, Opcode::Icmp) {
let cond_code = ctx.data(icmp).cond_code().unwrap();
let cond_code = emit_cmp(ctx, icmp, cond_code);
CC::from_intcc(cond_code)
} else {
let sel_ty = ctx.input_ty(insn, 0);
let size = OperandSize::from_ty(ctx.input_ty(insn, 0));
let test = put_input_in_reg(ctx, flag_input);
let test_input = if sel_ty == types::B1 {
// The input is a boolean value; test the LSB for nonzero with:
// test reg, 1
RegMemImm::imm(1)
} else {
// The input is an integer; test the whole value for
// nonzero with:
// test reg, reg
//
// (It doesn't make sense to have a boolean wider than
// one bit here -- which bit would cause us to select an
// input?)
assert!(!is_bool_ty(sel_ty));
RegMemImm::reg(test)
};
ctx.emit(Inst::test_rmi_r(size, test_input, test));
CC::NZ
};
// This doesn't affect the flags.
emit_moves(ctx, dst, rhs, ty);
if is_int_or_ref_ty(ty) || ty == types::I128 {
emit_cmoves(ctx, size, cc, lhs, dst);
} else {
debug_assert!(
ty == types::F32
|| ty == types::F64
|| (ty.is_vector() && ty.bits() == 128)
);
ctx.emit(Inst::xmm_cmove(
if ty == types::F64 {
OperandSize::Size64
} else {
OperandSize::Size32
},
cc,
RegMem::reg(lhs.only_reg().unwrap()),
dst.only_reg().unwrap(),
));
}
}
}
Opcode::Selectif | Opcode::SelectifSpectreGuard => {
let lhs = put_input_in_regs(ctx, inputs[1]);
let rhs = put_input_in_regs(ctx, inputs[2]);
let dst = get_output_reg(ctx, outputs[0]);
let ty = ctx.output_ty(insn, 0);
// Verification ensures that the input is always a single-def ifcmp.
let cmp_insn = ctx
.get_input_as_source_or_const(inputs[0].insn, inputs[0].input)
.inst
.unwrap()
.0;
debug_assert_eq!(ctx.data(cmp_insn).opcode(), Opcode::Ifcmp);
let cond_code = ctx.data(insn).cond_code().unwrap();
let cond_code = emit_cmp(ctx, cmp_insn, cond_code);
let cc = CC::from_intcc(cond_code);
if is_int_or_ref_ty(ty) || ty == types::I128 {
let size = ty.bytes() as u8;
emit_moves(ctx, dst, rhs, ty);
emit_cmoves(ctx, size, cc, lhs, dst);
} else {
debug_assert!(ty == types::F32 || ty == types::F64);
emit_moves(ctx, dst, rhs, ty);
ctx.emit(Inst::xmm_cmove(
if ty == types::F64 {
OperandSize::Size64
} else {
OperandSize::Size32
},
cc,
RegMem::reg(lhs.only_reg().unwrap()),
dst.only_reg().unwrap(),
));
}
}
Opcode::Udiv | Opcode::Urem | Opcode::Sdiv | Opcode::Srem => {
let kind = match op {
Opcode::Udiv => DivOrRemKind::UnsignedDiv,
Opcode::Sdiv => DivOrRemKind::SignedDiv,
Opcode::Urem => DivOrRemKind::UnsignedRem,
Opcode::Srem => DivOrRemKind::SignedRem,
_ => unreachable!(),
};
let is_div = kind.is_div();
let input_ty = ctx.input_ty(insn, 0);
let size = OperandSize::from_ty(input_ty);
let dividend = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::rax()),
dividend,
input_ty,
));
// Always do explicit checks for `srem`: otherwise, INT_MIN % -1 is not handled properly.
if flags.avoid_div_traps() || op == Opcode::Srem {
// A vcode meta-instruction is used to lower the inline checks, since they embed
// pc-relative offsets that must not change, thus requiring regalloc to not
// interfere by introducing spills and reloads.
//
// Note it keeps the result in $rax (for divide) or $rdx (for rem), so that
// regalloc is aware of the coalescing opportunity between rax/rdx and the
// destination register.
let divisor = put_input_in_reg(ctx, inputs[1]);
let divisor_copy = ctx.alloc_tmp(types::I64).only_reg().unwrap();
ctx.emit(Inst::gen_move(divisor_copy, divisor, types::I64));
let tmp = if op == Opcode::Sdiv && size == OperandSize::Size64 {
Some(ctx.alloc_tmp(types::I64).only_reg().unwrap())
} else {
None
};
// TODO use xor
ctx.emit(Inst::imm(
OperandSize::Size32,
0,
Writable::from_reg(regs::rdx()),
));
ctx.emit(Inst::checked_div_or_rem_seq(kind, size, divisor_copy, tmp));
} else {
// We don't want more than one trap record for a single instruction,
// so let's not allow the "mem" case (load-op merging) here; force
// divisor into a register instead.
let divisor = RegMem::reg(put_input_in_reg(ctx, inputs[1]));
// Fill in the high parts:
if kind.is_signed() {
// sign-extend the sign-bit of al into ah for size 1, or rax into rdx, for
// signed opcodes.
ctx.emit(Inst::sign_extend_data(size));
} else if input_ty == types::I8 {
ctx.emit(Inst::movzx_rm_r(
ExtMode::BL,
RegMem::reg(regs::rax()),
Writable::from_reg(regs::rax()),
));
} else {
// zero for unsigned opcodes.
ctx.emit(Inst::imm(
OperandSize::Size64,
0,
Writable::from_reg(regs::rdx()),
));
}
// Emit the actual idiv.
ctx.emit(Inst::div(size, kind.is_signed(), divisor));
}
// Move the result back into the destination reg.
if is_div {
// The quotient is in rax.
ctx.emit(Inst::gen_move(dst, regs::rax(), input_ty));
} else {
if size == OperandSize::Size8 {
// The remainder is in AH. Right-shift by 8 bits then move from rax.
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(8),
Writable::from_reg(regs::rax()),
));
ctx.emit(Inst::gen_move(dst, regs::rax(), input_ty));
} else {
// The remainder is in rdx.
ctx.emit(Inst::gen_move(dst, regs::rdx(), input_ty));
}
}
}
Opcode::Umulhi | Opcode::Smulhi => {
let input_ty = ctx.input_ty(insn, 0);
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
// Move lhs in %rax.
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::rax()),
lhs,
input_ty,
));
// Emit the actual mul or imul.
let signed = op == Opcode::Smulhi;
ctx.emit(Inst::mul_hi(OperandSize::from_ty(input_ty), signed, rhs));
// Read the result from the high part (stored in %rdx).
ctx.emit(Inst::gen_move(dst, regs::rdx(), input_ty));
}
Opcode::GetPinnedReg => {
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::gen_move(dst, regs::pinned_reg(), types::I64));
}
Opcode::SetPinnedReg => {
let src = put_input_in_reg(ctx, inputs[0]);
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::pinned_reg()),
src,
types::I64,
));
}
Opcode::Vconst => {
let used_constant = if let &InstructionData::UnaryConst {
constant_handle, ..
} = ctx.data(insn)
{
ctx.use_constant(VCodeConstantData::Pool(
constant_handle,
ctx.get_constant_data(constant_handle).clone(),
))
} else {
unreachable!("vconst should always have unary_const format")
};
// TODO use Inst::gen_constant() instead.
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let ty = ty.unwrap();
ctx.emit(Inst::xmm_load_const(used_constant, dst, ty));
}
Opcode::RawBitcast => {
// A raw_bitcast is just a mechanism for correcting the type of V128 values (see
// https://github.com/bytecodealliance/wasmtime/issues/1147). As such, this IR
// instruction should emit no machine code but a move is necessary to give the register
// allocator a definition for the output virtual register.
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let ty = ty.unwrap();
ctx.emit(Inst::gen_move(dst, src, ty));
}
Opcode::Shuffle => {
let ty = ty.unwrap();
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let lhs_ty = ctx.input_ty(insn, 0);
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = put_input_in_reg(ctx, inputs[1]);
let mask = match ctx.get_immediate(insn) {
Some(DataValue::V128(bytes)) => bytes.to_vec(),
_ => unreachable!("shuffle should always have a 16-byte immediate"),
};
// A mask-building helper: in 128-bit SIMD, 0-15 indicate which lane to read from and a
// 1 in the most significant position zeroes the lane.
let zero_unknown_lane_index = |b: u8| if b > 15 { 0b10000000 } else { b };
ctx.emit(Inst::gen_move(dst, rhs, ty));
if rhs == lhs {
// If `lhs` and `rhs` are the same we can use a single PSHUFB to shuffle the XMM
// register. We statically build `constructed_mask` to zero out any unknown lane
// indices (may not be completely necessary: verification could fail incorrect mask
// values) and fix the indexes to all point to the `dst` vector.
let constructed_mask = mask
.iter()
// If the mask is greater than 15 it still may be referring to a lane in b.
.map(|&b| if b > 15 { b.wrapping_sub(16) } else { b })
.map(zero_unknown_lane_index)
.collect();
let constant = ctx.use_constant(VCodeConstantData::Generated(constructed_mask));
let tmp = ctx.alloc_tmp(types::I8X16).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(constant, tmp, ty));
// After loading the constructed mask in a temporary register, we use this to
// shuffle the `dst` register (remember that, in this case, it is the same as
// `src` so we disregard this register).
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pshufb, RegMem::from(tmp), dst));
} else {
if isa_flags.use_avx512vl_simd() && isa_flags.use_avx512vbmi_simd() {
assert!(
mask.iter().all(|b| *b < 32),
"shuffle mask values must be between 0 and 31"
);
// Load the mask into the destination register.
let constant = ctx.use_constant(VCodeConstantData::Generated(mask.into()));
ctx.emit(Inst::xmm_load_const(constant, dst, ty));
// VPERMI2B has the exact semantics of Wasm's shuffle:
// permute the bytes in `src1` and `src2` using byte indexes
// in `dst` and store the byte results in `dst`.
ctx.emit(Inst::xmm_rm_r_evex(
Avx512Opcode::Vpermi2b,
RegMem::reg(rhs),
lhs,
dst,
));
} else {
// If `lhs` and `rhs` are different, we must shuffle each separately and then OR
// them together. This is necessary due to PSHUFB semantics. As in the case above,
// we build the `constructed_mask` for each case statically.
// PSHUFB the `lhs` argument into `tmp0`, placing zeroes for unused lanes.
let tmp0 = ctx.alloc_tmp(lhs_ty).only_reg().unwrap();
ctx.emit(Inst::gen_move(tmp0, lhs, lhs_ty));
let constructed_mask =
mask.iter().cloned().map(zero_unknown_lane_index).collect();
let constant = ctx.use_constant(VCodeConstantData::Generated(constructed_mask));
let tmp1 = ctx.alloc_tmp(types::I8X16).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(constant, tmp1, ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pshufb, RegMem::from(tmp1), tmp0));
// PSHUFB the second argument, placing zeroes for unused lanes.
let constructed_mask = mask
.iter()
.map(|b| b.wrapping_sub(16))
.map(zero_unknown_lane_index)
.collect();
let constant = ctx.use_constant(VCodeConstantData::Generated(constructed_mask));
let tmp2 = ctx.alloc_tmp(types::I8X16).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(constant, tmp2, ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pshufb, RegMem::from(tmp2), dst));
// OR the shuffled registers (the mechanism and lane-size for OR-ing the registers
// is not important).
ctx.emit(Inst::xmm_rm_r(SseOpcode::Orps, RegMem::from(tmp0), dst));
}
}
}
Opcode::Swizzle => {
// SIMD swizzle; the following inefficient implementation is due to the Wasm SIMD spec
// requiring mask indexes greater than 15 to have the same semantics as a 0 index. For
// the spec discussion, see https://github.com/WebAssembly/simd/issues/93. The CLIF
// semantics match the Wasm SIMD semantics for this instruction.
// The instruction format maps to variables like: %dst = swizzle %src, %mask
let ty = ty.unwrap();
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let src = put_input_in_reg(ctx, inputs[0]);
let swizzle_mask = put_input_in_reg(ctx, inputs[1]);
// Inform the register allocator that `src` and `dst` should be in the same register.
ctx.emit(Inst::gen_move(dst, src, ty));
// Create a mask for zeroing out-of-bounds lanes of the swizzle mask.
let zero_mask = ctx.alloc_tmp(types::I8X16).only_reg().unwrap();
static ZERO_MASK_VALUE: [u8; 16] = [
0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70,
0x70, 0x70,
];
let constant = ctx.use_constant(VCodeConstantData::WellKnown(&ZERO_MASK_VALUE));
ctx.emit(Inst::xmm_load_const(constant, zero_mask, ty));
// Use the `zero_mask` on a writable `swizzle_mask`.
let swizzle_mask = Writable::from_reg(swizzle_mask);
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Paddusb,
RegMem::from(zero_mask),
swizzle_mask,
));
// Shuffle `dst` using the fixed-up `swizzle_mask`.
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pshufb,
RegMem::from(swizzle_mask),
dst,
));
}
Opcode::Insertlane => {
unreachable!(
"implemented in ISLE: inst = `{}`, type = `{:?}`",
ctx.dfg().display_inst(insn),
ty
);
}
Opcode::Extractlane => {
// The instruction format maps to variables like: %dst = extractlane %src, %lane
let ty = ty.unwrap();
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let src_ty = ctx.input_ty(insn, 0);
assert_eq!(src_ty.bits(), 128);
let src = put_input_in_reg(ctx, inputs[0]);
let lane = if let InstructionData::BinaryImm8 { imm, .. } = ctx.data(insn) {
*imm
} else {
unreachable!();
};
debug_assert!(lane < src_ty.lane_count() as u8);
emit_extract_lane(ctx, src, dst, lane, ty);
}
Opcode::ScalarToVector => {
// When moving a scalar value to a vector register, we must be handle several
// situations:
// 1. a scalar float is already in an XMM register, so we simply move it
// 2. a scalar of any other type resides in a GPR register: MOVD moves the bits to an
// XMM register and zeroes the upper bits
// 3. a scalar (float or otherwise) that has previously been loaded from memory (e.g.
// the default lowering of Wasm's `load[32|64]_zero`) can be lowered to a single
// MOVSS/MOVSD instruction; to do this, we rely on `input_to_reg_mem` to sink the
// unused load.
let src = input_to_reg_mem(ctx, inputs[0]);
let src_ty = ctx.input_ty(insn, 0);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let dst_ty = ty.unwrap();
assert!(src_ty == dst_ty.lane_type() && dst_ty.bits() == 128);
match src {
RegMem::Reg { reg } => {
if src_ty.is_float() {
// Case 1: when moving a scalar float, we simply move from one XMM register
// to another, expecting the register allocator to elide this. Here we
// assume that the upper bits of a scalar float have not been munged with
// (the same assumption the old backend makes).
ctx.emit(Inst::gen_move(dst, reg, dst_ty));
} else {
// Case 2: when moving a scalar value of any other type, use MOVD to zero
// the upper lanes.
let src_size = match src_ty.bits() {
32 => OperandSize::Size32,
64 => OperandSize::Size64,
_ => unimplemented!("invalid source size for type: {}", src_ty),
};
ctx.emit(Inst::gpr_to_xmm(SseOpcode::Movd, src, src_size, dst));
}
}
RegMem::Mem { .. } => {
// Case 3: when presented with `load + scalar_to_vector`, coalesce into a single
// MOVSS/MOVSD instruction.
let opcode = match src_ty.bits() {
32 => SseOpcode::Movss,
64 => SseOpcode::Movsd,
_ => unimplemented!("unable to move scalar to vector for type: {}", src_ty),
};
ctx.emit(Inst::xmm_mov(opcode, src, dst));
}
}
}
Opcode::Splat => {
let ty = ty.unwrap();
assert_eq!(ty.bits(), 128);
let src_ty = ctx.input_ty(insn, 0);
assert!(src_ty.bits() < 128);
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
// We know that splat will overwrite all of the lanes of `dst` but it takes several
// instructions to do so. Because of the multiple instructions, there is no good way to
// declare `dst` a `def` except with the following pseudo-instruction.
ctx.emit(Inst::xmm_uninit_value(dst));
// TODO: eventually many of these sequences could be optimized with AVX's VBROADCAST*
// and VPBROADCAST*.
match ty.lane_bits() {
8 => {
emit_insert_lane(ctx, src, dst, 0, ty.lane_type());
// Initialize a register with all 0s.
let tmp = ctx.alloc_tmp(ty).only_reg().unwrap();
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), tmp));
// Shuffle the lowest byte lane to all other lanes.
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pshufb, RegMem::from(tmp), dst))
}
16 => {
emit_insert_lane(ctx, src.clone(), dst, 0, ty.lane_type());
emit_insert_lane(ctx, src, dst, 1, ty.lane_type());
// Shuffle the lowest two lanes to all other lanes.
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Pshufd,
RegMem::from(dst),
dst,
0,
OperandSize::Size32,
))
}
32 => {
emit_insert_lane(ctx, src, dst, 0, ty.lane_type());
// Shuffle the lowest lane to all other lanes.
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Pshufd,
RegMem::from(dst),
dst,
0,
OperandSize::Size32,
))
}
64 => {
emit_insert_lane(ctx, src.clone(), dst, 0, ty.lane_type());
emit_insert_lane(ctx, src, dst, 1, ty.lane_type());
}
_ => panic!("Invalid type to splat: {}", ty),
}
}
Opcode::VanyTrue => {
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let src_ty = ctx.input_ty(insn, 0);
assert_eq!(src_ty.bits(), 128);
let src = put_input_in_reg(ctx, inputs[0]);
// Set the ZF if the result is all zeroes.
ctx.emit(Inst::xmm_cmp_rm_r(SseOpcode::Ptest, RegMem::reg(src), src));
// If the ZF is not set, place a 1 in `dst`.
ctx.emit(Inst::setcc(CC::NZ, dst));
}
Opcode::VallTrue => {
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let src_ty = ctx.input_ty(insn, 0);
assert_eq!(src_ty.bits(), 128);
let src = input_to_reg_mem(ctx, inputs[0]);
let eq = |ty: Type| match ty.lane_bits() {
8 => SseOpcode::Pcmpeqb,
16 => SseOpcode::Pcmpeqw,
32 => SseOpcode::Pcmpeqd,
64 => SseOpcode::Pcmpeqq,
_ => panic!("Unable to find an instruction for {} for type: {}", op, ty),
};
// Initialize a register with all 0s.
let tmp = ctx.alloc_tmp(src_ty).only_reg().unwrap();
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), tmp));
// Compare to see what lanes are filled with all 1s.
ctx.emit(Inst::xmm_rm_r(eq(src_ty), src, tmp));
// Set the ZF if the result is all zeroes.
ctx.emit(Inst::xmm_cmp_rm_r(
SseOpcode::Ptest,
RegMem::from(tmp),
tmp.to_reg(),
));
// If the ZF is set, place a 1 in `dst`.
ctx.emit(Inst::setcc(CC::Z, dst));
}
Opcode::VhighBits => {
let src = put_input_in_reg(ctx, inputs[0]);
let src_ty = ctx.input_ty(insn, 0);
debug_assert!(src_ty.is_vector() && src_ty.bits() == 128);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
debug_assert!(dst.to_reg().get_class() == RegClass::I64);
// The Intel specification allows using both 32-bit and 64-bit GPRs as destination for
// the "move mask" instructions. This is controlled by the REX.R bit: "In 64-bit mode,
// the instruction can access additional registers when used with a REX.R prefix. The
// default operand size is 64-bit in 64-bit mode" (PMOVMSKB in IA Software Development
// Manual, vol. 2). This being the case, we will always clear REX.W since its use is
// unnecessary (`OperandSize` is used for setting/clearing REX.W).
let size = OperandSize::Size32;
match src_ty {
types::I8X16 | types::B8X16 => {
ctx.emit(Inst::xmm_to_gpr(SseOpcode::Pmovmskb, src, dst, size))
}
types::I32X4 | types::B32X4 | types::F32X4 => {
ctx.emit(Inst::xmm_to_gpr(SseOpcode::Movmskps, src, dst, size))
}
types::I64X2 | types::B64X2 | types::F64X2 => {
ctx.emit(Inst::xmm_to_gpr(SseOpcode::Movmskpd, src, dst, size))
}
types::I16X8 | types::B16X8 => {
// There is no x86 instruction for extracting the high bit of 16-bit lanes so
// here we:
// - duplicate the 16-bit lanes of `src` into 8-bit lanes:
// PACKSSWB([x1, x2, ...], [x1, x2, ...]) = [x1', x2', ..., x1', x2', ...]
// - use PMOVMSKB to gather the high bits; now we have duplicates, though
// - shift away the bottom 8 high bits to remove the duplicates.
let tmp = ctx.alloc_tmp(src_ty).only_reg().unwrap();
ctx.emit(Inst::gen_move(tmp, src, src_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Packsswb, RegMem::reg(src), tmp));
ctx.emit(Inst::xmm_to_gpr(
SseOpcode::Pmovmskb,
tmp.to_reg(),
dst,
size,
));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(8),
dst,
));
}
_ => unimplemented!("unknown input type {} for {}", src_ty, op),
}
}
Opcode::Iconcat => {
let ty = ctx.output_ty(insn, 0);
assert_eq!(
ty,
types::I128,
"Iconcat not expected to be used for non-128-bit type"
);
assert_eq!(ctx.input_ty(insn, 0), types::I64);
assert_eq!(ctx.input_ty(insn, 1), types::I64);
let lo = put_input_in_reg(ctx, inputs[0]);
let hi = put_input_in_reg(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gen_move(dst.regs()[0], lo, types::I64));
ctx.emit(Inst::gen_move(dst.regs()[1], hi, types::I64));
}
Opcode::Isplit => {
let ty = ctx.input_ty(insn, 0);
assert_eq!(
ty,
types::I128,
"Iconcat not expected to be used for non-128-bit type"
);
assert_eq!(ctx.output_ty(insn, 0), types::I64);
assert_eq!(ctx.output_ty(insn, 1), types::I64);
let src = put_input_in_regs(ctx, inputs[0]);
let dst_lo = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let dst_hi = get_output_reg(ctx, outputs[1]).only_reg().unwrap();
ctx.emit(Inst::gen_move(dst_lo, src.regs()[0], types::I64));
ctx.emit(Inst::gen_move(dst_hi, src.regs()[1], types::I64));
}
Opcode::TlsValue => match flags.tls_model() {
TlsModel::ElfGd => {
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let (name, _, _) = ctx.symbol_value(insn).unwrap();
let symbol = name.clone();
ctx.emit(Inst::ElfTlsGetAddr { symbol });
ctx.emit(Inst::gen_move(dst, regs::rax(), types::I64));
}
TlsModel::Macho => {
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let (name, _, _) = ctx.symbol_value(insn).unwrap();
let symbol = name.clone();
ctx.emit(Inst::MachOTlsGetAddr { symbol });
ctx.emit(Inst::gen_move(dst, regs::rax(), types::I64));
}
_ => {
todo!(
"Unimplemented TLS model in x64 backend: {:?}",
flags.tls_model()
);
}
},
Opcode::SqmulRoundSat => {
// Lane-wise saturating rounding multiplication in Q15 format
// Optimal lowering taken from instruction proposal https://github.com/WebAssembly/simd/pull/365
// y = i16x8.q15mulr_sat_s(a, b) is lowered to:
//MOVDQA xmm_y, xmm_a
//MOVDQA xmm_tmp, wasm_i16x8_splat(0x8000)
//PMULHRSW xmm_y, xmm_b
//PCMPEQW xmm_tmp, xmm_y
//PXOR xmm_y, xmm_tmp
let input_ty = ctx.input_ty(insn, 0);
let src1 = put_input_in_reg(ctx, inputs[0]);
let src2 = put_input_in_reg(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::gen_move(dst, src1, input_ty));
static SAT_MASK: [u8; 16] = [
0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80,
0x00, 0x80,
];
let mask_const = ctx.use_constant(VCodeConstantData::WellKnown(&SAT_MASK));
let mask = ctx.alloc_tmp(types::I16X8).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(mask_const, mask, types::I16X8));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmulhrsw, RegMem::reg(src2), dst));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pcmpeqw,
RegMem::reg(dst.to_reg()),
mask,
));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pxor,
RegMem::reg(mask.to_reg()),
dst,
));
}
Opcode::Uunarrow => {
if let Some(fcvt_inst) = matches_input(ctx, inputs[0], Opcode::FcvtToUintSat) {
//y = i32x4.trunc_sat_f64x2_u_zero(x) is lowered to:
//MOVAPD xmm_y, xmm_x
//XORPD xmm_tmp, xmm_tmp
//MAXPD xmm_y, xmm_tmp
//MINPD xmm_y, [wasm_f64x2_splat(4294967295.0)]
//ROUNDPD xmm_y, xmm_y, 0x0B
//ADDPD xmm_y, [wasm_f64x2_splat(0x1.0p+52)]
//SHUFPS xmm_y, xmm_xmp, 0x88
let fcvt_input = InsnInput {
insn: fcvt_inst,
input: 0,
};
let input_ty = ctx.input_ty(fcvt_inst, 0);
let output_ty = ctx.output_ty(insn, 0);
let src = put_input_in_reg(ctx, fcvt_input);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::gen_move(dst, src, input_ty));
let tmp1 = ctx.alloc_tmp(output_ty).only_reg().unwrap();
ctx.emit(Inst::xmm_rm_r(SseOpcode::Xorpd, RegMem::from(tmp1), tmp1));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Maxpd, RegMem::from(tmp1), dst));
// 4294967295.0 is equivalent to 0x41EFFFFFFFE00000
static UMAX_MASK: [u8; 16] = [
0x00, 0x00, 0xE0, 0xFF, 0xFF, 0xFF, 0xEF, 0x41, 0x00, 0x00, 0xE0, 0xFF, 0xFF,
0xFF, 0xEF, 0x41,
];
let umax_const = ctx.use_constant(VCodeConstantData::WellKnown(&UMAX_MASK));
let umax_mask = ctx.alloc_tmp(types::F64X2).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(umax_const, umax_mask, types::F64X2));
//MINPD xmm_y, [wasm_f64x2_splat(4294967295.0)]
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Minpd,
RegMem::from(umax_mask),
dst,
));
//ROUNDPD xmm_y, xmm_y, 0x0B
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Roundpd,
RegMem::reg(dst.to_reg()),
dst,
RoundImm::RoundZero.encode(),
OperandSize::Size32,
));
//ADDPD xmm_y, [wasm_f64x2_splat(0x1.0p+52)]
static UINT_MASK: [u8; 16] = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x30, 0x43,
];
let uint_mask_const = ctx.use_constant(VCodeConstantData::WellKnown(&UINT_MASK));
let uint_mask = ctx.alloc_tmp(types::F64X2).only_reg().unwrap();
ctx.emit(Inst::xmm_load_const(
uint_mask_const,
uint_mask,
types::F64X2,
));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Addpd,
RegMem::from(uint_mask),
dst,
));
//SHUFPS xmm_y, xmm_xmp, 0x88
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Shufps,
RegMem::reg(tmp1.to_reg()),
dst,
0x88,
OperandSize::Size32,
));
} else {
println!("Did not match fcvt input!");
}
}
// Unimplemented opcodes below. These are not currently used by Wasm
// lowering or other known embeddings, but should be either supported or
// removed eventually.
Opcode::Uload8x8Complex
| Opcode::Sload8x8Complex
| Opcode::Uload16x4Complex
| Opcode::Sload16x4Complex
| Opcode::Uload32x2Complex
| Opcode::Sload32x2Complex => {
unimplemented!("Vector load {:?} not implemented", op);
}
Opcode::Cls => unimplemented!("Cls not supported"),
Opcode::Fma => unimplemented!("Fma not supported"),
Opcode::BorNot | Opcode::BxorNot => {
unimplemented!("or-not / xor-not opcodes not implemented");
}
Opcode::Bmask => unimplemented!("Bmask not implemented"),
Opcode::Trueif | Opcode::Trueff => unimplemented!("trueif / trueff not implemented"),
Opcode::ConstAddr => unimplemented!("ConstAddr not implemented"),
Opcode::Vsplit | Opcode::Vconcat => {
unimplemented!("Vector split/concat ops not implemented.");
}
// Opcodes that should be removed by legalization. These should
// eventually be removed if/when we replace in-situ legalization with
// something better.
Opcode::Ifcmp | Opcode::Ffcmp => {
panic!("Should never reach ifcmp/ffcmp as isel root!");
}
Opcode::IaddImm
| Opcode::ImulImm
| Opcode::UdivImm
| Opcode::SdivImm
| Opcode::UremImm
| Opcode::SremImm
| Opcode::IrsubImm
| Opcode::IaddCin
| Opcode::IaddIfcin
| Opcode::IaddCout
| Opcode::IaddCarry
| Opcode::IaddIfcarry
| Opcode::IsubBin
| Opcode::IsubIfbin
| Opcode::IsubBout
| Opcode::IsubIfbout
| Opcode::IsubBorrow
| Opcode::IsubIfborrow
| Opcode::BandImm
| Opcode::BorImm
| Opcode::BxorImm
| Opcode::RotlImm
| Opcode::RotrImm
| Opcode::IshlImm
| Opcode::UshrImm
| Opcode::SshrImm
| Opcode::IcmpImm
| Opcode::IfcmpImm => {
panic!("ALU+imm and ALU+carry ops should not appear here!");
}
Opcode::StackLoad | Opcode::StackStore => {
panic!("Direct stack memory access not supported; should have been legalized");
}
Opcode::GlobalValue => {
panic!("global_value should have been removed by legalization!");
}
Opcode::HeapAddr => {
panic!("heap_addr should have been removed by legalization!");
}
Opcode::TableAddr => {
panic!("table_addr should have been removed by legalization!");
}
Opcode::IfcmpSp | Opcode::Copy => {
panic!("Unused opcode should not be encountered.");
}
Opcode::Trapz | Opcode::Trapnz | Opcode::ResumableTrapnz => {
panic!("trapz / trapnz / resumable_trapnz should have been removed by legalization!");
}
Opcode::Jump
| Opcode::Brz
| Opcode::Brnz
| Opcode::BrIcmp
| Opcode::Brif
| Opcode::Brff
| Opcode::BrTable => {
panic!("Branch opcode reached non-branch lowering logic!");
}
Opcode::Nop => {
// Nothing.
}
}
Ok(())
}
//=============================================================================
// Lowering-backend trait implementation.
impl LowerBackend for X64Backend {
type MInst = Inst;
fn lower<C: LowerCtx<I = Inst>>(&self, ctx: &mut C, ir_inst: IRInst) -> CodegenResult<()> {
lower_insn_to_regs(ctx, ir_inst, &self.flags, &self.x64_flags, &self.triple)
}
fn lower_branch_group<C: LowerCtx<I = Inst>>(
&self,
ctx: &mut C,
branches: &[IRInst],
targets: &[MachLabel],
) -> CodegenResult<()> {
// A block should end with at most two branches. The first may be a
// conditional branch; a conditional branch can be followed only by an
// unconditional branch or fallthrough. Otherwise, if only one branch,
// it may be an unconditional branch, a fallthrough, a return, or a
// trap. These conditions are verified by `is_ebb_basic()` during the
// verifier pass.
assert!(branches.len() <= 2);
if branches.len() == 2 {
// Must be a conditional branch followed by an unconditional branch.
let op0 = ctx.data(branches[0]).opcode();
let op1 = ctx.data(branches[1]).opcode();
trace!(
"lowering two-branch group: opcodes are {:?} and {:?}",
op0,
op1
);
assert!(op1 == Opcode::Jump);
let taken = targets[0];
// not_taken target is the target of the second branch.
let not_taken = targets[1];
match op0 {
Opcode::Brz | Opcode::Brnz => {
let flag_input = InsnInput {
insn: branches[0],
input: 0,
};
let src_ty = ctx.input_ty(branches[0], 0);
if let Some(icmp) = matches_input(ctx, flag_input, Opcode::Icmp) {
let cond_code = ctx.data(icmp).cond_code().unwrap();
let cond_code = emit_cmp(ctx, icmp, cond_code);
let cond_code = if op0 == Opcode::Brz {
cond_code.inverse()
} else {
cond_code
};
let cc = CC::from_intcc(cond_code);
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
} else if let Some(fcmp) = matches_input(ctx, flag_input, Opcode::Fcmp) {
let cond_code = ctx.data(fcmp).fp_cond_code().unwrap();
let cond_code = if op0 == Opcode::Brz {
cond_code.inverse()
} else {
cond_code
};
match emit_fcmp(ctx, fcmp, cond_code, FcmpSpec::Normal) {
FcmpCondResult::Condition(cc) => {
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
}
FcmpCondResult::AndConditions(cc1, cc2) => {
ctx.emit(Inst::jmp_if(cc1.invert(), not_taken));
ctx.emit(Inst::jmp_cond(cc2.invert(), not_taken, taken));
}
FcmpCondResult::OrConditions(cc1, cc2) => {
ctx.emit(Inst::jmp_if(cc1, taken));
ctx.emit(Inst::jmp_cond(cc2, taken, not_taken));
}
FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(),
}
} else if src_ty == types::I128 {
let src = put_input_in_regs(
ctx,
InsnInput {
insn: branches[0],
input: 0,
},
);
let (half_cc, comb_op) = match op0 {
Opcode::Brz => (CC::Z, AluRmiROpcode::And8),
Opcode::Brnz => (CC::NZ, AluRmiROpcode::Or8),
_ => unreachable!(),
};
let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
ctx.emit(Inst::cmp_rmi_r(
OperandSize::Size64,
RegMemImm::imm(0),
src.regs()[0],
));
ctx.emit(Inst::setcc(half_cc, tmp1));
ctx.emit(Inst::cmp_rmi_r(
OperandSize::Size64,
RegMemImm::imm(0),
src.regs()[1],
));
ctx.emit(Inst::setcc(half_cc, tmp2));
ctx.emit(Inst::alu_rmi_r(
OperandSize::Size32,
comb_op,
RegMemImm::reg(tmp1.to_reg()),
tmp2,
));
ctx.emit(Inst::jmp_cond(CC::NZ, taken, not_taken));
} else if is_int_or_ref_ty(src_ty) || is_bool_ty(src_ty) {
let src = put_input_in_reg(
ctx,
InsnInput {
insn: branches[0],
input: 0,
},
);
let cc = match op0 {
Opcode::Brz => CC::Z,
Opcode::Brnz => CC::NZ,
_ => unreachable!(),
};
// See case for `Opcode::Select` above re: testing the
// boolean input.
let test_input = if src_ty == types::B1 {
// test src, 1
RegMemImm::imm(1)
} else {
assert!(!is_bool_ty(src_ty));
// test src, src
RegMemImm::reg(src)
};
ctx.emit(Inst::test_rmi_r(
OperandSize::from_ty(src_ty),
test_input,
src,
));
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
} else {
unimplemented!("brz/brnz with non-int type {:?}", src_ty);
}
}
Opcode::BrIcmp => {
let src_ty = ctx.input_ty(branches[0], 0);
if is_int_or_ref_ty(src_ty) || is_bool_ty(src_ty) {
let lhs = put_input_in_reg(
ctx,
InsnInput {
insn: branches[0],
input: 0,
},
);
let rhs = input_to_reg_mem_imm(
ctx,
InsnInput {
insn: branches[0],
input: 1,
},
);
let cc = CC::from_intcc(ctx.data(branches[0]).cond_code().unwrap());
// Cranelift's icmp semantics want to compare lhs - rhs, while Intel gives
// us dst - src at the machine instruction level, so invert operands.
ctx.emit(Inst::cmp_rmi_r(OperandSize::from_ty(src_ty), rhs, lhs));
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
} else {
unimplemented!("bricmp with non-int type {:?}", src_ty);
}
}
Opcode::Brif => {
let flag_input = InsnInput {
insn: branches[0],
input: 0,
};
if let Some(ifcmp) = matches_input(ctx, flag_input, Opcode::Ifcmp) {
let cond_code = ctx.data(branches[0]).cond_code().unwrap();
let cond_code = emit_cmp(ctx, ifcmp, cond_code);
let cc = CC::from_intcc(cond_code);
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
} else if let Some(ifcmp_sp) = matches_input(ctx, flag_input, Opcode::IfcmpSp) {
let operand = put_input_in_reg(
ctx,
InsnInput {
insn: ifcmp_sp,
input: 0,
},
);
let ty = ctx.input_ty(ifcmp_sp, 0);
ctx.emit(Inst::cmp_rmi_r(
OperandSize::from_ty(ty),
RegMemImm::reg(regs::rsp()),
operand,
));
let cond_code = ctx.data(branches[0]).cond_code().unwrap();
let cc = CC::from_intcc(cond_code);
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
} else {
// Should be disallowed by flags checks in verifier.
unimplemented!("Brif with non-ifcmp input");
}
}
Opcode::Brff => {
let flag_input = InsnInput {
insn: branches[0],
input: 0,
};
if let Some(ffcmp) = matches_input(ctx, flag_input, Opcode::Ffcmp) {
let cond_code = ctx.data(branches[0]).fp_cond_code().unwrap();
match emit_fcmp(ctx, ffcmp, cond_code, FcmpSpec::Normal) {
FcmpCondResult::Condition(cc) => {
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
}
FcmpCondResult::AndConditions(cc1, cc2) => {
ctx.emit(Inst::jmp_if(cc1.invert(), not_taken));
ctx.emit(Inst::jmp_cond(cc2.invert(), not_taken, taken));
}
FcmpCondResult::OrConditions(cc1, cc2) => {
ctx.emit(Inst::jmp_if(cc1, taken));
ctx.emit(Inst::jmp_cond(cc2, taken, not_taken));
}
FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(),
}
} else {
// Should be disallowed by flags checks in verifier.
unimplemented!("Brff with input not from ffcmp");
}
}
_ => panic!("unexpected branch opcode: {:?}", op0),
}
} else {
assert_eq!(branches.len(), 1);
// Must be an unconditional branch or trap.
let op = ctx.data(branches[0]).opcode();
match op {
Opcode::Jump => {
ctx.emit(Inst::jmp_known(targets[0]));
}
Opcode::BrTable => {
let jt_size = targets.len() - 1;
assert!(jt_size <= u32::MAX as usize);
let jt_size = jt_size as u32;
let ty = ctx.input_ty(branches[0], 0);
let ext_spec = match ty {
types::I128 => panic!("BrTable unimplemented for I128"),
types::I64 => ExtSpec::ZeroExtendTo64,
_ => ExtSpec::ZeroExtendTo32,
};
let idx = extend_input_to_reg(
ctx,
InsnInput {
insn: branches[0],
input: 0,
},
ext_spec,
);
// Bounds-check (compute flags from idx - jt_size) and branch to default.
// We only support u32::MAX entries, but we compare the full 64 bit register
// when doing the bounds check.
let cmp_size = if ty == types::I64 {
OperandSize::Size64
} else {
OperandSize::Size32
};
ctx.emit(Inst::cmp_rmi_r(cmp_size, RegMemImm::imm(jt_size), idx));
// Emit the compound instruction that does:
//
// lea $jt, %rA
// movsbl [%rA, %rIndex, 2], %rB
// add %rB, %rA
// j *%rA
// [jt entries]
//
// This must be *one* instruction in the vcode because we cannot allow regalloc
// to insert any spills/fills in the middle of the sequence; otherwise, the
// lea PC-rel offset to the jumptable would be incorrect. (The alternative
// is to introduce a relocation pass for inlined jumptables, which is much
// worse.)
// This temporary is used as a signed integer of 64-bits (to hold addresses).
let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
// This temporary is used as a signed integer of 32-bits (for the wasm-table
// index) and then 64-bits (address addend). The small lie about the I64 type
// is benign, since the temporary is dead after this instruction (and its
// Cranelift type is thus unused).
let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let targets_for_term: Vec<MachLabel> = targets.to_vec();
let default_target = targets[0];
let jt_targets: Vec<MachLabel> = targets.iter().skip(1).cloned().collect();
ctx.emit(Inst::JmpTableSeq {
idx,
tmp1,
tmp2,
default_target,
targets: jt_targets,
targets_for_term,
});
}
_ => panic!("Unknown branch type {:?}", op),
}
}
Ok(())
}
fn maybe_pinned_reg(&self) -> Option<Reg> {
Some(regs::pinned_reg())
}
}
|
{
put_input_in_regs(ctx, spec)
.only_reg()
.expect("Multi-register value not expected")
}
|
nvme_passthru.rs
|
use alloc::vec::Vec;
use core::mem::MaybeUninit;
use bitflags::bitflags;
use core::{
fmt::{Debug, Display, Formatter},
ptr::null_mut,
slice,
};
use uefi::{
data_types::unsafe_guid,
newtype_enum,
proto::{device_path::DevicePath, Protocol},
Event, Status,
};
#[unsafe_guid("52c78312-8edc-4233-98f2-1a1aa5e388a5")]
#[derive(Protocol)]
#[repr(C)]
pub struct NvmExpressPassthru {
mode: *const Mode,
pass_thru: unsafe extern "efiapi" fn(
this: &NvmExpressPassthru,
namespace_id: u32,
packet: &mut CommandPacket,
event: Event,
) -> Status,
get_next_namespace:
unsafe extern "efiapi" fn(this: &NvmExpressPassthru, namespace_id: &mut u32) -> Status,
build_device_path: unsafe extern "efiapi" fn(
this: &NvmExpressPassthru,
namespace_id: u32,
device_path: &mut *mut DevicePath,
) -> Status,
get_namespace: unsafe extern "efiapi" fn(
this: &NvmExpressPassthru,
device_path: &DevicePath,
namespace_id: &mut u32,
) -> Status,
}
impl NvmExpressPassthru {
pub fn mode(&self) -> &Mode {
unsafe { &*self.mode }
}
pub unsafe fn send<'a, 'b: 'a>(
&'a mut self,
target: SendTarget,
packet: &mut CommandPacket<'b>,
) -> uefi::Result<NvmeCompletion> {
self.send_async(target, packet, core::mem::zeroed())
}
pub unsafe fn
|
<'a, 'b: 'a>(
&'a mut self,
target: SendTarget,
mut packet: &mut CommandPacket<'b>,
event: Event,
) -> uefi::Result<NvmeCompletion> {
let id = match target {
SendTarget::Controller => 0,
SendTarget::Namespace(id) => id.to_u32(),
SendTarget::AllNamespaces => 0xFFFFFFFF,
};
let mut completion = Default::default();
packet.completion = Some(&mut completion);
(self.pass_thru)(self, id, packet, event).into_with_val(|| completion)
}
// shorthand assuming there is always at least one namespace on the NVMe controller
pub fn first_namespace(&self) -> uefi::Result<NamespaceId> {
let mut namespace_id = 0xFFFFFFFF;
unsafe { (self.get_next_namespace)(self, &mut namespace_id) }
.into_with_val(|| NamespaceId(namespace_id))
}
pub fn list_namespaces(&self) -> uefi::Result<Vec<NamespaceId>> {
let mut namespace_id = 0xFFFFFFFF;
let mut result = Vec::new();
loop {
let status = unsafe { (self.get_next_namespace)(self, &mut namespace_id) };
if status.is_success() {
result.push(NamespaceId(namespace_id));
} else if status == Status::NOT_FOUND {
break Status::SUCCESS.into_with_val(|| result);
} else {
break Err(status.into());
}
}
}
pub fn build_device_path(&self, namespace_id: NamespaceId) -> uefi::Result<&mut DevicePath> {
let mut device_path = null_mut();
unsafe { (self.build_device_path)(self, namespace_id.to_u32(), &mut device_path) }
.into_with_val(|| unsafe { device_path.as_mut() }.unwrap())
}
pub fn get_namespace(&self, device_path: &DevicePath) -> uefi::Result<NamespaceId> {
let mut namespace_id = 0;
unsafe { (self.get_namespace)(self, device_path, &mut namespace_id) }
.into_with_val(|| unsafe { NamespaceId::new(namespace_id) })
}
}
#[derive(Debug)]
pub enum SendTarget {
Controller,
Namespace(NamespaceId),
AllNamespaces,
}
newtype_enum! {
#[must_use]
pub enum QueueType: u8 => {
ADMIN = 0,
IO = 1,
}
}
bitflags! {
#[repr(transparent)]
pub struct Attributes: u32 {
const PHYSICAL = 0x01;
const LOGICAL = 0x02;
const NONBLOCKIO = 0x04;
const CMD_SET_NVM = 0x08;
}
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct Version {
data: [u8; 4],
}
impl Version {
pub fn major(&self) -> u16 {
(self.data[0] as u16) << 8 | self.data[1] as u16
}
pub fn minor(&self) -> u8 {
self.data[2]
}
pub fn tertiary(&self) -> u8 {
self.data[3]
}
}
impl Debug for Version {
fn fmt(&self, f: &mut Formatter) -> core::fmt::Result {
f.debug_struct("Version")
.field("major", &self.major())
.field("minor", &self.major())
.field("tertiary", &self.tertiary())
.finish()
}
}
impl Display for Version {
fn fmt(&self, f: &mut Formatter) -> core::fmt::Result {
write!(f, "{}.{}.{}", self.major(), self.minor(), self.tertiary())
}
}
#[derive(Debug)]
#[repr(C)]
pub struct Mode {
pub attributes: Attributes,
pub io_align: u32,
pub version: Version,
}
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Copy, Clone)]
#[repr(transparent)]
pub struct NamespaceId(u32);
impl NamespaceId {
/// # Safety
/// This contructor allows to construct arbitrary unchecked NamespaceIds.
///
/// This is to ensure that only safe way to get them is to actually query the device,
/// as well as because all-zero and all-one bit patterns are not allowed to be NamespaceIds.
pub const unsafe fn new(id: u32) -> NamespaceId {
NamespaceId(id)
}
pub const fn to_u32(self) -> u32 {
self.0
}
}
pub const NVME_GENERIC_TIMEOUT: u64 = 5_000_000;
bitflags! {
#[repr(transparent)]
struct CdwValidityFlags: u8 {
const CDW_2 = 0x01;
const CDW_3 = 0x02;
const CDW_10 = 0x04;
const CDW_11 = 0x08;
const CDW_12 = 0x10;
const CDW_13 = 0x20;
const CDW_14 = 0x40;
const CDW_15 = 0x80;
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Cdw0 {
opcode: u8, // UINT32 Opcode:8;
fused: u8, // UINT32 FusedOperation:2;
_res1: u8, //
_res2: u8, // UINT32 Reserved:22;
}
#[repr(C)]
#[derive(Debug)]
pub struct Command {
cdw_0: Cdw0,
flags: CdwValidityFlags,
pub nsid: u32,
pub cdw_2: u32,
pub cdw_3: u32,
pub cdw_10: u32,
pub cdw_11: u32,
pub cdw_12: u32,
pub cdw_13: u32,
pub cdw_14: u32,
pub cdw_15: u32,
}
macro_rules! cdws {
($($name:ident: $flag:ident;)*) => {
$(
pub const fn $name(mut self, $name: u32) -> Self {
self.$name = $name;
self.flags.bits |= CdwValidityFlags::$flag.bits;
self
}
)*
};
}
impl Command {
pub const fn new(opcode: u8) -> Command {
Command {
cdw_0: Cdw0 {
opcode,
fused: 0,
_res1: 0,
_res2: 0,
},
flags: CdwValidityFlags::empty(),
nsid: 0,
cdw_2: 0,
cdw_3: 0,
cdw_10: 0,
cdw_11: 0,
cdw_12: 0,
cdw_13: 0,
cdw_14: 0,
cdw_15: 0,
}
}
pub const fn fused_first(mut self) -> Self {
self.cdw_0.fused |= 0b10000000;
self
}
pub const fn fused_second(mut self) -> Self {
self.cdw_0.fused |= 0b01000000;
self
}
pub const fn ns(mut self, namespace: NamespaceId) -> Self {
self.nsid = namespace.to_u32();
self
}
cdws! {
cdw_2: CDW_2;
cdw_3: CDW_3;
cdw_10: CDW_10;
cdw_11: CDW_11;
cdw_12: CDW_12;
cdw_13: CDW_13;
cdw_14: CDW_14;
cdw_15: CDW_15;
}
}
#[repr(C)]
#[derive(Debug, Default, Clone)]
pub struct NvmeCompletion {
pub dw_0: u32,
pub dw_1: u32,
pub dw_2: u32,
pub dw_3: u32,
}
#[repr(C)]
pub struct CommandPacket<'a> {
pub timeout: u64,
transfer_buffer: *mut MaybeUninit<u8>,
transfer_length: u32,
metadata_buffer: *mut MaybeUninit<u8>,
metadata_length: u32,
pub queue_type: QueueType,
pub cmd: &'a Command,
pub completion: Option<*mut NvmeCompletion>,
}
impl Debug for CommandPacket<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
f.debug_struct("CommandPacket")
.field("timeout", &self.timeout)
.field("transfer_buffer", unsafe {
&slice::from_raw_parts_mut(self.transfer_buffer, self.transfer_length as usize)
})
.field("metadata_buffer", unsafe {
&slice::from_raw_parts_mut(self.metadata_buffer, self.metadata_length as usize)
})
.field("queue_type", &self.queue_type)
.field("cmd", &self.cmd)
// .field("completion", &self.completion)
.finish()
}
}
impl<'a> CommandPacket<'a> {
pub fn new(
timeout: u64,
transfer_buffer: Option<&mut [MaybeUninit<u8>]>,
metadata_buffer: Option<&mut [MaybeUninit<u8>]>,
queue_type: QueueType,
cmd: &'a Command,
) -> Self {
let (transfer_buffer, transfer_length) = transfer_buffer
.map(|it| (it.as_mut_ptr(), it.len() as u32))
.unwrap_or((null_mut(), 0));
let (metadata_buffer, metadata_length) = metadata_buffer
.map(|it| (it.as_mut_ptr(), it.len() as u32))
.unwrap_or((null_mut(), 0));
Self {
timeout,
transfer_buffer,
transfer_length,
metadata_buffer,
metadata_length,
queue_type,
cmd,
completion: None,
}
}
}
|
send_async
|
error.rs
|
//! jsonrpc errors
use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer};
use super::Value;
/// JSONRPC error code
#[derive(Debug, PartialEq, Clone)]
pub enum ErrorCode {
/// Invalid JSON was received by the server.
/// An error occurred on the server while parsing the JSON text.
ParseError,
/// The JSON sent is not a valid Request object.
InvalidRequest,
/// The method does not exist / is not available.
MethodNotFound,
/// Invalid method parameter(s).
InvalidParams,
/// Internal JSON-RPC error.
InternalError,
/// Reserved for implementation-defined server-errors.
ServerError(i64)
}
impl ErrorCode {
/// Returns integer code value
pub fn code(&self) -> i64 {
match *self {
ErrorCode::ParseError => -32700,
ErrorCode::InvalidRequest => -32600,
ErrorCode::MethodNotFound => -32601,
ErrorCode::InvalidParams => -32602,
ErrorCode::InternalError => -32603,
ErrorCode::ServerError(code) => code
}
}
/// Returns human-readable description
pub fn description(&self) -> String {
let desc = match *self {
ErrorCode::ParseError => "Parse error",
ErrorCode::InvalidRequest => "Invalid request",
ErrorCode::MethodNotFound => "Method not found",
ErrorCode::InvalidParams => "Invalid params",
ErrorCode::InternalError => "Internal error",
ErrorCode::ServerError(_) => "Server error",
};
desc.to_string()
}
}
impl Deserialize for ErrorCode {
fn deserialize<D>(deserializer: D) -> Result<ErrorCode, D::Error>
where D: Deserializer {
let v: Value = try!(Deserialize::deserialize(deserializer));
match v.as_i64() {
Some(-32700) => Ok(ErrorCode::ParseError),
Some(-32600) => Ok(ErrorCode::InvalidRequest),
Some(-32601) => Ok(ErrorCode::MethodNotFound),
Some(-32602) => Ok(ErrorCode::InvalidParams),
Some(-32603) => Ok(ErrorCode::InternalError),
Some(code) => Ok(ErrorCode::ServerError(code)),
_ => unreachable!()
}
}
}
impl Serialize for ErrorCode {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
serializer.serialize_i64(self.code())
}
}
/// Error object as defined in Spec
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub struct Error {
/// Code
pub code: ErrorCode,
/// Message
pub message: String,
/// Optional data
#[serde(skip_serializing_if = "Option::is_none")]
pub data: Option<Value>
}
impl Error {
/// Wraps given `ErrorCode`
pub fn new(code: ErrorCode) -> Self {
Error {
message: code.description(),
code: code,
data: None
}
}
/// Creates new `ParseError`
pub fn parse_error() -> Self
|
/// Creates new `InvalidRequest`
pub fn invalid_request() -> Self {
Self::new(ErrorCode::InvalidRequest)
}
/// Creates new `MethodNotFound`
pub fn method_not_found() -> Self {
Self::new(ErrorCode::MethodNotFound)
}
/// Creates new `InvalidParams`
pub fn invalid_params<M>(message: M) -> Self where
M: Into<String>,
{
Error {
code: ErrorCode::InvalidParams,
message: message.into(),
data: None,
}
}
/// Creates new `InternalError`
pub fn internal_error() -> Self {
Self::new(ErrorCode::InternalError)
}
/// Creates new `InvalidRequest` with invalid version description
pub fn invalid_version() -> Self {
Error {
code: ErrorCode::InvalidRequest,
message: "Unsupported JSON-RPC protocol version".to_owned(),
data: None,
}
}
}
|
{
Self::new(ErrorCode::ParseError)
}
|
darts.py
|
def
|
(x, y):
distance = x ** 2 + y ** 2
return 10 if distance <= 1 ** 2 else 5 if distance <= 5 ** 2 else 1 if distance <= 10 ** 2 else 0
|
score
|
files.go
|
package main
import (
"github.com/dresswithpockets/go-vgui"
"github.com/faiface/pixel"
"image"
"os"
)
type PictureProvider interface {
getPicture(path string) (pixel.Picture, error)
}
type VguiProvider interface {
getObject(path string) (*vgui.Object, error)
}
type SourcePictureProvider struct {
pictures map[string]pixel.Picture
fileSourceProvider vgui.FileSourceProvider
}
type SourceVguiProvider struct {
roots map[string]*vgui.Object
fileSourceProvider vgui.FileSourceProvider
}
// getPicture returns a pixel.Picture from the path, and caches it in pictures.
// If the pixel.Picture is not already loaded & mapped to the path, it will load it according to fileSourceProvider
func (p *SourcePictureProvider) getPicture(path string) (pixel.Picture, error) {
abs, err := p.fileSourceProvider.GetAbsolute(path)
if err != nil {
return nil, err
}
if pic, ok := p.pictures[abs]; ok {
return pic, nil
}
pic, err := loadPicture(abs)
if err != nil {
return nil, err
}
p.pictures[abs] = pic
return pic, nil
}
func (p * SourceVguiProvider) getObject(path string) (*vgui.Object, error) {
return vgui.FromFileSourceProvider(path, p.fileSourceProvider)
}
func
|
(path string) (pixel.Picture, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
img, _, err := image.Decode(file)
if err != nil {
return nil, err
}
return pixel.PictureDataFromImage(img), nil
}
|
loadPicture
|
post.tsx
|
import React from "react"
import styled from 'styled-components'
import LatestPostsBase from "../components/LatestPosts"
import { MDXProvider } from "@mdx-js/react"
import CodeWindow from "../components/CodeWindow"
import thing from '../images/thing.svg'
import { graphql, Link as GastbyLink } from 'gatsby'
import { MDXRenderer } from "gatsby-plugin-mdx"
import Layout from "../components/Layout"
import Seo from "../components/Seo"
import Tags from "../components/Tags"
import { helmetJsonLdProp } from "react-schemaorg";
import { BlogPosting } from "schema-dts";
const StyledCodeWindow = styled(CodeWindow)`
margin: 48px auto;
`
function
|
({ children }) {
const { props: { children: source, className: classLanguage, title } } = children
const language = classLanguage ? classLanguage.replace(/language-/, '') : ''
return <StyledCodeWindow language={language} source={source} title={title} />
}
const StyledP = styled.p`
margin: 36px auto 0;
font-size: 18px;
line-height: 1.9;
code {
font-family: Inconsolata, monospace;
padding: 3px;
border: 1px solid #aaa;
border-radius: 6px;
}
`
const StyledH1 = styled.h1`
font-size: 60px;
background: linear-gradient(266.96deg, #3FC5FF -21.03%, #FF3EC9 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
letter-spacing: -0.04em;
margin: 30px auto 0;
font-weight: 900;
line-height: 1;
padding-bottom: 1rem;
`
const StyledH2 = styled.h2`
font-size: 30px;
margin: 127px auto 0;
position: relative;
::before {
width: 7px;
height: 72px;
content: '';
left: 3px;
position: absolute;
bottom: -20px;
top: -98px;;
background: url(${thing});
}
`
const StyledH3 = styled.h3`
font-size: 24px;
margin: 48px auto 0;
`
const StyledUl = styled.div`
margin: 48px 0 0;
`
const StyledLi = styled.li`
margin: 12px 0 0;
line-height: 1.9;
`
const components = {
p: props => <StyledP {...props}>{props.children}</StyledP>,
h1: props => <StyledH1 {...props}>{props.children}</StyledH1>,
h2: props => <StyledH2 level="2" {...props}>{props.children}</StyledH2>,
h3: props => <StyledH3 level="3" {...props}>{props.children}</StyledH3>,
pre: props => <MDXCodeBlock {...props} />,
ul: props => <StyledUl {...props} />,
li: props => <StyledLi {...props} />,
}
const LatestPostHeading = styled.h3`
background: linear-gradient(266.96deg, #3FC5FF -21.03%, #FF3EC9 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
letter-spacing: -0.04em;
margin: 54px 0 0;
`
const LatestPosts = styled(LatestPostsBase)`
margin: 24px 0 0;
`
const PostInfo = styled.div`
display: flex;
align-items: center;
justify-content: space-between;
font-size: 12px;
`
const DatePublished = styled.div`
`
const TimeToRead = styled.p`
`
const LayoutLink = styled(GastbyLink)`
text-decoration: none;
font-size: 12px;
`
const StyledTags = styled(Tags)`
`
const Wrapper = styled.div`
max-width: 768px;
margin: 0 auto;
`
const PostLayout = (props) => {
const { mdx: { body, frontmatter, parent, timeToRead, slug }, site: {siteMetadata} } = props.data
const jsonLd = helmetJsonLdProp<BlogPosting>({
"@context": "https://schema.org",
"@type": "BlogPosting",
"headline": frontmatter.title,
"datePublished": frontmatter.date,
"dateModified": parent.modifiedTime,
"url": `${siteMetadata.url}/blog/${slug}`,
"author": [{
"@type": "Person",
"name": "Cesar Varela",
"url": siteMetadata.url,
}]
})
return <Layout content={<LayoutLink to="/blog">Blog</LayoutLink>}>
<Seo title={frontmatter.title} script={jsonLd} />
<Wrapper>
<StyledH1>{frontmatter.title}</StyledH1>
<PostInfo>
<DatePublished>{frontmatter.date}</DatePublished>
<TimeToRead>{timeToRead} min.</TimeToRead>
</PostInfo>
<StyledTags tags={frontmatter.tags} />
<MDXProvider components={components}>
<MDXRenderer>
{body}
</MDXRenderer>
</MDXProvider>
<LatestPostHeading>Latest posts</LatestPostHeading>
<LatestPosts />
</Wrapper>
</Layout >
}
export const query = graphql`
query($slug: String!) {
mdx(slug: {eq: $slug}) {
body
frontmatter {
date(formatString: "MMMM DD, YYYY")
title
tags
}
slug
timeToRead
parent {
... on File {
id
name
modifiedTime(formatString: "MMMM DD, YYYY")
}
}
}
site {
siteMetadata {
url
}
}
}`
export default PostLayout
|
MDXCodeBlock
|
json_deser.rs
|
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub fn parse_http_generic_error(
response: &http::Response<bytes::Bytes>,
) -> Result<aws_smithy_types::Error, aws_smithy_json::deserialize::Error> {
crate::json_errors::parse_generic_error(response.body(), response.headers())
}
pub fn deser_structure_crate_error_access_denied_exception_json_err(
value: &[u8],
mut builder: crate::error::access_denied_exception::Builder,
) -> Result<crate::error::access_denied_exception::Builder, aws_smithy_json::deserialize::Error> {
let mut tokens_owned =
aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value))
.peekable();
let tokens = &mut tokens_owned;
aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Message" => {
builder = builder.set_message(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(aws_smithy_json::deserialize::Error::custom(format!(
"expected object key or end object, found: {:?}",
other
)))
}
}
}
if tokens.next().is_some() {
return Err(aws_smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_structure_crate_error_conflict_exception_json_err(
value: &[u8],
mut builder: crate::error::conflict_exception::Builder,
) -> Result<crate::error::conflict_exception::Builder, aws_smithy_json::deserialize::Error> {
let mut tokens_owned =
aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value))
.peekable();
let tokens = &mut tokens_owned;
aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Message" => {
builder = builder.set_message(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(aws_smithy_json::deserialize::Error::custom(format!(
"expected object key or end object, found: {:?}",
other
)))
}
}
}
if tokens.next().is_some() {
return Err(aws_smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_structure_crate_error_internal_server_exception_json_err(
value: &[u8],
mut builder: crate::error::internal_server_exception::Builder,
) -> Result<crate::error::internal_server_exception::Builder, aws_smithy_json::deserialize::Error> {
let mut tokens_owned =
aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value))
.peekable();
let tokens = &mut tokens_owned;
aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Message" => {
builder = builder.set_message(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(aws_smithy_json::deserialize::Error::custom(format!(
"expected object key or end object, found: {:?}",
other
)))
}
}
}
if tokens.next().is_some() {
return Err(aws_smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_structure_crate_error_resource_not_found_exception_json_err(
value: &[u8],
mut builder: crate::error::resource_not_found_exception::Builder,
) -> Result<crate::error::resource_not_found_exception::Builder, aws_smithy_json::deserialize::Error>
{
let mut tokens_owned =
aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value))
.peekable();
let tokens = &mut tokens_owned;
aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Message" => {
builder = builder.set_message(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(aws_smithy_json::deserialize::Error::custom(format!(
"expected object key or end object, found: {:?}",
other
)))
}
}
}
if tokens.next().is_some() {
return Err(aws_smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_structure_crate_error_validation_exception_json_err(
value: &[u8],
mut builder: crate::error::validation_exception::Builder,
) -> Result<crate::error::validation_exception::Builder, aws_smithy_json::deserialize::Error> {
let mut tokens_owned =
aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value))
.peekable();
let tokens = &mut tokens_owned;
aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Message" => {
builder = builder.set_message(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(aws_smithy_json::deserialize::Error::custom(format!(
"expected object key or end object, found: {:?}",
other
)))
}
}
}
if tokens.next().is_some() {
return Err(aws_smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_operation_crate_operation_create_endpoint(
value: &[u8],
mut builder: crate::output::create_endpoint_output::Builder,
) -> Result<crate::output::create_endpoint_output::Builder, aws_smithy_json::deserialize::Error> {
let mut tokens_owned =
aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value))
.peekable();
let tokens = &mut tokens_owned;
aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"EndpointArn" => {
builder = builder.set_endpoint_arn(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(aws_smithy_json::deserialize::Error::custom(format!(
"expected object key or end object, found: {:?}",
other
)))
}
}
}
if tokens.next().is_some() {
return Err(aws_smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_operation_crate_operation_list_endpoints(
value: &[u8],
mut builder: crate::output::list_endpoints_output::Builder,
) -> Result<crate::output::list_endpoints_output::Builder, aws_smithy_json::deserialize::Error> {
let mut tokens_owned =
aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value))
.peekable();
let tokens = &mut tokens_owned;
aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Endpoints" => {
builder = builder.set_endpoints(
crate::json_deser::deser_list_com_amazonaws_s3outposts_endpoints(
tokens,
)?,
);
}
"NextToken" => {
builder = builder.set_next_token(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(aws_smithy_json::deserialize::Error::custom(format!(
"expected object key or end object, found: {:?}",
other
)))
}
}
}
if tokens.next().is_some() {
return Err(aws_smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn deser_operation_crate_operation_list_shared_endpoints(
value: &[u8],
mut builder: crate::output::list_shared_endpoints_output::Builder,
) -> Result<crate::output::list_shared_endpoints_output::Builder, aws_smithy_json::deserialize::Error>
{
let mut tokens_owned =
aws_smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(value))
.peekable();
let tokens = &mut tokens_owned;
aws_smithy_json::deserialize::token::expect_start_object(tokens.next())?;
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"Endpoints" => {
builder = builder.set_endpoints(
crate::json_deser::deser_list_com_amazonaws_s3outposts_endpoints(
tokens,
)?,
);
}
"NextToken" => {
builder = builder.set_next_token(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(aws_smithy_json::deserialize::Error::custom(format!(
"expected object key or end object, found: {:?}",
other
)))
}
}
}
if tokens.next().is_some() {
return Err(aws_smithy_json::deserialize::Error::custom(
"found more JSON tokens after completing parsing",
));
}
Ok(builder)
}
pub fn or_empty_doc(data: &[u8]) -> &[u8] {
if data.is_empty() {
b"{}"
} else {
data
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_list_com_amazonaws_s3outposts_endpoints<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<std::vec::Vec<crate::model::Endpoint>>, aws_smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => {
let mut items = Vec::new();
loop {
match tokens.peek() {
Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => {
tokens.next().transpose().unwrap();
break;
}
_ => {
let value =
crate::json_deser::deser_structure_crate_model_endpoint(tokens)?;
if let Some(value) = value {
items.push(value);
}
}
}
}
Ok(Some(items))
}
_ => Err(aws_smithy_json::deserialize::Error::custom(
"expected start array or null",
)),
}
}
pub fn deser_structure_crate_model_endpoint<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::Endpoint>, aws_smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::Endpoint::builder();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"EndpointArn" => {
builder = builder.set_endpoint_arn(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"OutpostsId" => {
builder = builder.set_outposts_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"CidrBlock" => {
builder = builder.set_cidr_block(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"Status" => {
builder = builder.set_status(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::EndpointStatus::from(u.as_ref()))
})
.transpose()?,
);
}
"CreationTime" => {
builder = builder.set_creation_time(
aws_smithy_json::deserialize::token::expect_timestamp_or_null(
tokens.next(),
aws_smithy_types::date_time::Format::EpochSeconds,
)?,
);
}
"NetworkInterfaces" => {
builder = builder.set_network_interfaces(
crate::json_deser::deser_list_com_amazonaws_s3outposts_network_interfaces(tokens)?
);
}
"VpcId" => {
builder = builder.set_vpc_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"SubnetId" => {
builder = builder.set_subnet_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"SecurityGroupId" => {
builder = builder.set_security_group_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"AccessType" => {
builder = builder.set_access_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::EndpointAccessType::from(u.as_ref())
})
})
.transpose()?,
);
}
"CustomerOwnedIpv4Pool" => {
builder = builder.set_customer_owned_ipv4_pool(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(aws_smithy_json::deserialize::Error::custom(format!(
"expected object key or end object, found: {:?}",
other
)))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(aws_smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
#[allow(clippy::type_complexity, non_snake_case)]
pub fn deser_list_com_amazonaws_s3outposts_network_interfaces<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<std::vec::Vec<crate::model::NetworkInterface>>,
aws_smithy_json::deserialize::Error,
>
where
I: Iterator<
Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartArray { .. }) => {
let mut items = Vec::new();
loop {
match tokens.peek() {
Some(Ok(aws_smithy_json::deserialize::Token::EndArray { .. })) => {
tokens.next().transpose().unwrap();
break;
}
_ => {
let value =
crate::json_deser::deser_structure_crate_model_network_interface(
tokens,
)?;
if let Some(value) = value {
items.push(value);
}
}
}
}
Ok(Some(items))
}
_ => Err(aws_smithy_json::deserialize::Error::custom(
"expected start array or null",
)),
}
}
pub fn deser_structure_crate_model_network_interface<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<Option<crate::model::NetworkInterface>, aws_smithy_json::deserialize::Error>
where
I: Iterator<
Item = Result<aws_smithy_json::deserialize::Token<'a>, aws_smithy_json::deserialize::Error>,
>,
|
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::NetworkInterface::builder();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"NetworkInterfaceId" => {
builder = builder.set_network_interface_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(aws_smithy_json::deserialize::Error::custom(format!(
"expected object key or end object, found: {:?}",
other
)))
}
}
}
Ok(Some(builder.build()))
}
_ => Err(aws_smithy_json::deserialize::Error::custom(
"expected start object or null",
)),
}
}
|
|
inventory.py
|
# MIT License
#
# (C) Copyright [2022] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Create Nornir Inventory from SLS."""
import json
import click
from canu.utils.sls import pull_sls_hardware, pull_sls_networks
def inventory(username, password, network, sls_json=None):
"""Build Nornir inventory from sls_input."""
inventory = {"groups": "shasta", "hosts": {}}
if sls_json:
try:
input_json = json.load(sls_json)
except (json.JSONDecodeError, UnicodeDecodeError):
click.secho(
f"The file {sls_json.name} is not valid JSON.",
fg="red",
)
return
|
sls_variables = pull_sls_networks()
sls_hardware = pull_sls_hardware()
for k in sls_variables[network + "_IPs"]:
if "sw" in k:
inventory["hosts"].update(
{
k: {
"hostname": str(sls_variables[network + "_IPs"][k]),
"platform": "",
"username": username,
"password": password,
"data": {"type": ""},
},
},
)
# pull in the platform type from sls hardware data
for x in sls_hardware:
if (
x["Type"] == "comptype_hl_switch"
or x["Type"] == "comptype_mgmt_switch"
or x["Type"] == "comptype_cdu_mgmt_switch"
):
for host in inventory["hosts"]:
if host == x["ExtraProperties"]["Aliases"][0]:
if x["ExtraProperties"]["Brand"] == "Aruba":
inventory["hosts"][host]["platform"] = "aruba_os"
elif x["ExtraProperties"]["Brand"] == "Dell":
inventory["hosts"][host]["platform"] = "dell_os10"
elif x["ExtraProperties"]["Brand"] == "Mellanox":
inventory["hosts"][host]["platform"] = "mellanox"
else:
inventory["hosts"][host]["platform"] = "generic"
if "sw-leaf-bmc" in host:
inventory["hosts"][host]["data"]["type"] = "leaf-bmc"
elif "sw-leaf" in host:
inventory["hosts"][host]["data"]["type"] = "leaf"
elif "sw-spine" in host:
inventory["hosts"][host]["data"]["type"] = "spine"
elif "sw-cdu" in host:
inventory["hosts"][host]["data"]["type"] = "cdu"
inventory = {
"plugin": "DictInventory",
"options": {
"hosts": inventory["hosts"],
"groups": {},
"defaults": {},
},
}
return inventory
|
sls_variables = pull_sls_networks(input_json)
sls_hardware = pull_sls_hardware(input_json)
else:
|
lib.rs
|
#![feature(nll)]
#![feature(static_nobundle)]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
// NOTE: This crate only exists to allow linking on mingw targets.
/// Initialize targets enabled by the build script via `cfg(llvm_component = "...")`.
/// N.B., this function can't be moved to `rustc_codegen_llvm` because of the `cfg`s.
pub fn initialize_available_targets()
|
{
macro_rules! init_target(
($cfg:meta, $($method:ident),*) => { {
#[cfg($cfg)]
fn init() {
extern {
$(fn $method();)*
}
unsafe {
$($method();)*
}
}
#[cfg(not($cfg))]
fn init() { }
init();
} }
);
init_target!(llvm_component = "x86",
LLVMInitializeX86TargetInfo,
LLVMInitializeX86Target,
LLVMInitializeX86TargetMC,
LLVMInitializeX86AsmPrinter,
LLVMInitializeX86AsmParser);
init_target!(llvm_component = "arm",
LLVMInitializeARMTargetInfo,
LLVMInitializeARMTarget,
LLVMInitializeARMTargetMC,
LLVMInitializeARMAsmPrinter,
LLVMInitializeARMAsmParser);
init_target!(llvm_component = "aarch64",
LLVMInitializeAArch64TargetInfo,
LLVMInitializeAArch64Target,
LLVMInitializeAArch64TargetMC,
LLVMInitializeAArch64AsmPrinter,
LLVMInitializeAArch64AsmParser);
init_target!(llvm_component = "amdgpu",
LLVMInitializeAMDGPUTargetInfo,
LLVMInitializeAMDGPUTarget,
LLVMInitializeAMDGPUTargetMC,
LLVMInitializeAMDGPUAsmPrinter,
LLVMInitializeAMDGPUAsmParser);
init_target!(llvm_component = "ducky",
LLVMInitializeDuckyTargetInfo,
LLVMInitializeDuckyTarget,
LLVMInitializeDuckyTargetMC,
LLVMInitializeDuckyAsmPrinter,
LLVMInitializeDuckyAsmParser);
init_target!(llvm_component = "mips",
LLVMInitializeMipsTargetInfo,
LLVMInitializeMipsTarget,
LLVMInitializeMipsTargetMC,
LLVMInitializeMipsAsmPrinter,
LLVMInitializeMipsAsmParser);
init_target!(llvm_component = "powerpc",
LLVMInitializePowerPCTargetInfo,
LLVMInitializePowerPCTarget,
LLVMInitializePowerPCTargetMC,
LLVMInitializePowerPCAsmPrinter,
LLVMInitializePowerPCAsmParser);
init_target!(llvm_component = "systemz",
LLVMInitializeSystemZTargetInfo,
LLVMInitializeSystemZTarget,
LLVMInitializeSystemZTargetMC,
LLVMInitializeSystemZAsmPrinter,
LLVMInitializeSystemZAsmParser);
init_target!(llvm_component = "jsbackend",
LLVMInitializeJSBackendTargetInfo,
LLVMInitializeJSBackendTarget,
LLVMInitializeJSBackendTargetMC);
init_target!(llvm_component = "msp430",
LLVMInitializeMSP430TargetInfo,
LLVMInitializeMSP430Target,
LLVMInitializeMSP430TargetMC,
LLVMInitializeMSP430AsmPrinter);
init_target!(all(llvm_component = "msp430", llvm_has_msp430_asm_parser),
LLVMInitializeMSP430AsmParser);
init_target!(llvm_component = "riscv",
LLVMInitializeRISCVTargetInfo,
LLVMInitializeRISCVTarget,
LLVMInitializeRISCVTargetMC,
LLVMInitializeRISCVAsmPrinter,
LLVMInitializeRISCVAsmParser);
init_target!(llvm_component = "sparc",
LLVMInitializeSparcTargetInfo,
LLVMInitializeSparcTarget,
LLVMInitializeSparcTargetMC,
LLVMInitializeSparcAsmPrinter,
LLVMInitializeSparcAsmParser);
init_target!(llvm_component = "nvptx",
LLVMInitializeNVPTXTargetInfo,
LLVMInitializeNVPTXTarget,
LLVMInitializeNVPTXTargetMC,
LLVMInitializeNVPTXAsmPrinter);
init_target!(llvm_component = "hexagon",
LLVMInitializeHexagonTargetInfo,
LLVMInitializeHexagonTarget,
LLVMInitializeHexagonTargetMC,
LLVMInitializeHexagonAsmPrinter,
LLVMInitializeHexagonAsmParser);
init_target!(llvm_component = "webassembly",
LLVMInitializeWebAssemblyTargetInfo,
LLVMInitializeWebAssemblyTarget,
LLVMInitializeWebAssemblyTargetMC,
LLVMInitializeWebAssemblyAsmPrinter);
}
|
|
signal.rs
|
// Portions of this file are Copyright 2014 The Rust Project Developers.
// See http://rust-lang.org/COPYRIGHT.
///! Operating system signals.
use libc;
use {Error, Result};
use errno::Errno;
use std::mem;
use std::fmt;
use std::str::FromStr;
#[cfg(any(target_os = "dragonfly", target_os = "freebsd"))]
use std::os::unix::io::RawFd;
use std::ptr;
#[cfg(not(target_os = "openbsd"))]
pub use self::sigevent::*;
libc_enum!{
// Currently there is only one definition of c_int in libc, as well as only one
// type for signal constants.
// We would prefer to use the libc::c_int alias in the repr attribute. Unfortunately
// this is not (yet) possible.
#[repr(i32)]
pub enum Signal {
SIGHUP,
SIGINT,
SIGQUIT,
SIGILL,
SIGTRAP,
SIGABRT,
SIGBUS,
SIGFPE,
SIGKILL,
SIGUSR1,
SIGSEGV,
SIGUSR2,
SIGPIPE,
SIGALRM,
SIGTERM,
#[cfg(all(any(target_os = "android", target_os = "emscripten", target_os = "linux"),
not(any(target_arch = "mips", target_arch = "mips64", target_arch = "sparc64"))))]
SIGSTKFLT,
SIGCHLD,
SIGCONT,
SIGSTOP,
SIGTSTP,
SIGTTIN,
SIGTTOU,
SIGURG,
SIGXCPU,
SIGXFSZ,
SIGVTALRM,
SIGPROF,
SIGWINCH,
SIGIO,
#[cfg(any(target_os = "android", target_os = "emscripten", target_os = "linux"))]
SIGPWR,
SIGSYS,
#[cfg(not(any(target_os = "android", target_os = "emscripten", target_os = "linux")))]
SIGEMT,
#[cfg(not(any(target_os = "android", target_os = "emscripten", target_os = "linux")))]
SIGINFO,
}
}
impl FromStr for Signal {
type Err = Error;
fn from_str(s: &str) -> Result<Signal> {
Ok(match s {
"SIGHUP" => Signal::SIGHUP,
"SIGINT" => Signal::SIGINT,
"SIGQUIT" => Signal::SIGQUIT,
"SIGILL" => Signal::SIGILL,
"SIGTRAP" => Signal::SIGTRAP,
"SIGABRT" => Signal::SIGABRT,
"SIGBUS" => Signal::SIGBUS,
"SIGFPE" => Signal::SIGFPE,
"SIGKILL" => Signal::SIGKILL,
"SIGUSR1" => Signal::SIGUSR1,
"SIGSEGV" => Signal::SIGSEGV,
"SIGUSR2" => Signal::SIGUSR2,
"SIGPIPE" => Signal::SIGPIPE,
"SIGALRM" => Signal::SIGALRM,
"SIGTERM" => Signal::SIGTERM,
#[cfg(all(any(target_os = "android", target_os = "emscripten", target_os = "linux"),
not(any(target_arch = "mips", target_arch = "mips64", target_arch = "sparc64"))))]
"SIGSTKFLT" => Signal::SIGSTKFLT,
"SIGCHLD" => Signal::SIGCHLD,
"SIGCONT" => Signal::SIGCONT,
"SIGSTOP" => Signal::SIGSTOP,
"SIGTSTP" => Signal::SIGTSTP,
"SIGTTIN" => Signal::SIGTTIN,
"SIGTTOU" => Signal::SIGTTOU,
"SIGURG" => Signal::SIGURG,
"SIGXCPU" => Signal::SIGXCPU,
"SIGXFSZ" => Signal::SIGXFSZ,
"SIGVTALRM" => Signal::SIGVTALRM,
"SIGPROF" => Signal::SIGPROF,
"SIGWINCH" => Signal::SIGWINCH,
"SIGIO" => Signal::SIGIO,
#[cfg(any(target_os = "android", target_os = "emscripten", target_os = "linux"))]
"SIGPWR" => Signal::SIGPWR,
"SIGSYS" => Signal::SIGSYS,
#[cfg(not(any(target_os = "android", target_os = "emscripten", target_os = "linux")))]
"SIGEMT" => Signal::SIGEMT,
#[cfg(not(any(target_os = "android", target_os = "emscripten", target_os = "linux")))]
"SIGINFO" => Signal::SIGINFO,
_ => return Err(Error::invalid_argument()),
})
}
}
impl AsRef<str> for Signal {
fn as_ref(&self) -> &str {
match *self {
Signal::SIGHUP => "SIGHUP",
Signal::SIGINT => "SIGINT",
Signal::SIGQUIT => "SIGQUIT",
Signal::SIGILL => "SIGILL",
Signal::SIGTRAP => "SIGTRAP",
Signal::SIGABRT => "SIGABRT",
Signal::SIGBUS => "SIGBUS",
Signal::SIGFPE => "SIGFPE",
Signal::SIGKILL => "SIGKILL",
Signal::SIGUSR1 => "SIGUSR1",
Signal::SIGSEGV => "SIGSEGV",
Signal::SIGUSR2 => "SIGUSR2",
Signal::SIGPIPE => "SIGPIPE",
Signal::SIGALRM => "SIGALRM",
Signal::SIGTERM => "SIGTERM",
#[cfg(all(any(target_os = "android", target_os = "emscripten", target_os = "linux"),
not(any(target_arch = "mips", target_arch = "mips64", target_arch = "sparc64"))))]
Signal::SIGSTKFLT => "SIGSTKFLT",
Signal::SIGCHLD => "SIGCHLD",
Signal::SIGCONT => "SIGCONT",
Signal::SIGSTOP => "SIGSTOP",
Signal::SIGTSTP => "SIGTSTP",
Signal::SIGTTIN => "SIGTTIN",
Signal::SIGTTOU => "SIGTTOU",
Signal::SIGURG => "SIGURG",
Signal::SIGXCPU => "SIGXCPU",
Signal::SIGXFSZ => "SIGXFSZ",
Signal::SIGVTALRM => "SIGVTALRM",
Signal::SIGPROF => "SIGPROF",
Signal::SIGWINCH => "SIGWINCH",
Signal::SIGIO => "SIGIO",
#[cfg(any(target_os = "android", target_os = "emscripten", target_os = "linux"))]
Signal::SIGPWR => "SIGPWR",
Signal::SIGSYS => "SIGSYS",
#[cfg(not(any(target_os = "android", target_os = "emscripten", target_os = "linux")))]
Signal::SIGEMT => "SIGEMT",
#[cfg(not(any(target_os = "android", target_os = "emscripten", target_os = "linux")))]
Signal::SIGINFO => "SIGINFO",
}
}
}
impl fmt::Display for Signal {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(self.as_ref())
}
}
pub use self::Signal::*;
#[cfg(all(any(target_os = "linux", target_os = "android", target_os = "emscripten"), not(any(target_arch = "mips", target_arch = "mips64", target_arch = "sparc64"))))]
const SIGNALS: [Signal; 31] = [
SIGHUP,
SIGINT,
SIGQUIT,
SIGILL,
SIGTRAP,
SIGABRT,
SIGBUS,
SIGFPE,
SIGKILL,
SIGUSR1,
SIGSEGV,
SIGUSR2,
SIGPIPE,
SIGALRM,
SIGTERM,
SIGSTKFLT,
SIGCHLD,
SIGCONT,
SIGSTOP,
SIGTSTP,
SIGTTIN,
SIGTTOU,
SIGURG,
SIGXCPU,
SIGXFSZ,
SIGVTALRM,
SIGPROF,
SIGWINCH,
SIGIO,
SIGPWR,
SIGSYS];
#[cfg(all(any(target_os = "linux", target_os = "android", target_os = "emscripten"), any(target_arch = "mips", target_arch = "mips64", target_arch = "sparc64")))]
const SIGNALS: [Signal; 30] = [
SIGHUP,
SIGINT,
SIGQUIT,
SIGILL,
SIGTRAP,
SIGABRT,
SIGBUS,
SIGFPE,
SIGKILL,
SIGUSR1,
SIGSEGV,
SIGUSR2,
SIGPIPE,
SIGALRM,
SIGTERM,
SIGCHLD,
SIGCONT,
SIGSTOP,
SIGTSTP,
SIGTTIN,
SIGTTOU,
SIGURG,
SIGXCPU,
SIGXFSZ,
SIGVTALRM,
SIGPROF,
SIGWINCH,
SIGIO,
SIGPWR,
SIGSYS];
#[cfg(not(any(target_os = "linux", target_os = "android", target_os = "emscripten")))]
const SIGNALS: [Signal; 31] = [
SIGHUP,
SIGINT,
SIGQUIT,
SIGILL,
SIGTRAP,
SIGABRT,
SIGBUS,
SIGFPE,
SIGKILL,
SIGUSR1,
SIGSEGV,
SIGUSR2,
SIGPIPE,
SIGALRM,
SIGTERM,
SIGCHLD,
SIGCONT,
SIGSTOP,
SIGTSTP,
SIGTTIN,
SIGTTOU,
SIGURG,
SIGXCPU,
SIGXFSZ,
SIGVTALRM,
SIGPROF,
SIGWINCH,
SIGIO,
SIGSYS,
SIGEMT,
SIGINFO];
pub const NSIG: libc::c_int = 32;
#[derive(Clone, Copy)]
#[allow(missing_debug_implementations)]
pub struct SignalIterator {
next: usize,
}
impl Iterator for SignalIterator {
type Item = Signal;
fn next(&mut self) -> Option<Signal> {
if self.next < SIGNALS.len() {
let next_signal = SIGNALS[self.next];
self.next += 1;
Some(next_signal)
} else {
None
}
}
}
impl Signal {
pub fn iterator() -> SignalIterator {
SignalIterator{next: 0}
}
// We do not implement the From trait, because it is supposed to be infallible.
// With Rust RFC 1542 comes the appropriate trait TryFrom. Once it is
// implemented, we'll replace this function.
#[inline]
pub fn from_c_int(signum: libc::c_int) -> Result<Signal> {
if 0 < signum && signum < NSIG {
Ok(unsafe { mem::transmute(signum) })
} else {
Err(Error::invalid_argument())
}
}
}
pub const SIGIOT : Signal = SIGABRT;
pub const SIGPOLL : Signal = SIGIO;
pub const SIGUNUSED : Signal = SIGSYS;
#[cfg(not(target_os = "android"))]
libc_bitflags!{
pub struct SaFlags: libc::c_int {
SA_NOCLDSTOP;
SA_NOCLDWAIT;
SA_NODEFER;
SA_ONSTACK;
SA_RESETHAND;
SA_RESTART;
SA_SIGINFO;
}
}
// On 64-bit android, sa_flags is c_uint while on 32-bit android, it is
// c_ulong.
// FIXME: https://github.com/rust-lang/libc/pull/511
#[cfg(all(target_os = "android", target_pointer_width = "32"))]
libc_bitflags!{
pub struct SaFlags: libc::c_ulong {
SA_NOCLDSTOP as libc::c_ulong;
SA_NOCLDWAIT as libc::c_ulong;
SA_NODEFER as libc::c_ulong;
SA_ONSTACK as libc::c_ulong;
SA_RESETHAND as libc::c_ulong;
SA_RESTART as libc::c_ulong;
SA_SIGINFO as libc::c_ulong;
}
}
#[cfg(all(target_os = "android", target_pointer_width = "64"))]
libc_bitflags!{
pub struct SaFlags: libc::c_uint {
SA_NOCLDSTOP as libc::c_uint;
SA_NOCLDWAIT as libc::c_uint;
SA_NODEFER as libc::c_uint;
SA_ONSTACK as libc::c_uint;
SA_RESETHAND as libc::c_uint;
SA_RESTART as libc::c_uint;
SA_SIGINFO as libc::c_uint;
}
}
libc_enum! {
#[repr(i32)]
pub enum SigmaskHow {
SIG_BLOCK,
SIG_UNBLOCK,
SIG_SETMASK,
}
}
#[derive(Clone, Copy)]
#[allow(missing_debug_implementations)]
pub struct SigSet {
sigset: libc::sigset_t
}
impl SigSet {
pub fn all() -> SigSet {
let mut sigset: libc::sigset_t = unsafe { mem::uninitialized() };
let _ = unsafe { libc::sigfillset(&mut sigset as *mut libc::sigset_t) };
SigSet { sigset: sigset }
}
pub fn empty() -> SigSet {
let mut sigset: libc::sigset_t = unsafe { mem::uninitialized() };
let _ = unsafe { libc::sigemptyset(&mut sigset as *mut libc::sigset_t) };
SigSet { sigset: sigset }
}
pub fn add(&mut self, signal: Signal) {
unsafe { libc::sigaddset(&mut self.sigset as *mut libc::sigset_t, signal as libc::c_int) };
}
pub fn clear(&mut self) {
unsafe { libc::sigemptyset(&mut self.sigset as *mut libc::sigset_t) };
}
pub fn remove(&mut self, signal: Signal) {
unsafe { libc::sigdelset(&mut self.sigset as *mut libc::sigset_t, signal as libc::c_int) };
}
pub fn contains(&self, signal: Signal) -> bool {
let res = unsafe { libc::sigismember(&self.sigset as *const libc::sigset_t, signal as libc::c_int) };
match res {
1 => true,
0 => false,
_ => unreachable!("unexpected value from sigismember"),
}
}
pub fn extend(&mut self, other: &SigSet) {
for signal in Signal::iterator() {
if other.contains(signal) {
self.add(signal);
}
}
}
/// Gets the currently blocked (masked) set of signals for the calling thread.
pub fn thread_get_mask() -> Result<SigSet> {
let mut oldmask: SigSet = unsafe { mem::uninitialized() };
pthread_sigmask(SigmaskHow::SIG_SETMASK, None, Some(&mut oldmask))?;
Ok(oldmask)
}
/// Sets the set of signals as the signal mask for the calling thread.
pub fn thread_set_mask(&self) -> Result<()> {
pthread_sigmask(SigmaskHow::SIG_SETMASK, Some(self), None)
}
/// Adds the set of signals to the signal mask for the calling thread.
pub fn thread_block(&self) -> Result<()> {
pthread_sigmask(SigmaskHow::SIG_BLOCK, Some(self), None)
}
/// Removes the set of signals from the signal mask for the calling thread.
pub fn thread_unblock(&self) -> Result<()> {
pthread_sigmask(SigmaskHow::SIG_UNBLOCK, Some(self), None)
}
/// Sets the set of signals as the signal mask, and returns the old mask.
pub fn thread_swap_mask(&self, how: SigmaskHow) -> Result<SigSet> {
let mut oldmask: SigSet = unsafe { mem::uninitialized() };
pthread_sigmask(how, Some(self), Some(&mut oldmask))?;
Ok(oldmask)
}
/// Suspends execution of the calling thread until one of the signals in the
/// signal mask becomes pending, and returns the accepted signal.
pub fn wait(&self) -> Result<Signal> {
let mut signum: libc::c_int = unsafe { mem::uninitialized() };
let res = unsafe { libc::sigwait(&self.sigset as *const libc::sigset_t, &mut signum) };
Errno::result(res).map(|_| Signal::from_c_int(signum).unwrap())
}
}
impl AsRef<libc::sigset_t> for SigSet {
fn as_ref(&self) -> &libc::sigset_t {
&self.sigset
}
}
/// A signal handler.
#[allow(unknown_lints)]
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum SigHandler {
/// Default signal handling.
SigDfl,
/// Request that the signal be ignored.
SigIgn,
/// Use the given signal-catching function, which takes in the signal.
Handler(extern fn(libc::c_int)),
/// Use the given signal-catching function, which takes in the signal, information about how
/// the signal was generated, and a pointer to the threads `ucontext_t`.
SigAction(extern fn(libc::c_int, *mut libc::siginfo_t, *mut libc::c_void))
}
/// Action to take on receipt of a signal. Corresponds to `sigaction`.
#[derive(Clone, Copy)]
#[allow(missing_debug_implementations)]
pub struct SigAction {
sigaction: libc::sigaction
}
impl SigAction {
/// Creates a new action.
///
/// The `SA_SIGINFO` bit in the `flags` argument is ignored (it will be set only if `handler`
/// is the `SigAction` variant). `mask` specifies other signals to block during execution of
/// the signal-catching function.
pub fn new(handler: SigHandler, flags: SaFlags, mask: SigSet) -> SigAction {
let mut s = unsafe { mem::uninitialized::<libc::sigaction>() };
s.sa_sigaction = match handler {
SigHandler::SigDfl => libc::SIG_DFL,
SigHandler::SigIgn => libc::SIG_IGN,
SigHandler::Handler(f) => f as *const extern fn(libc::c_int) as usize,
SigHandler::SigAction(f) => f as *const extern fn(libc::c_int, *mut libc::siginfo_t, *mut libc::c_void) as usize,
};
s.sa_flags = match handler {
SigHandler::SigAction(_) => (flags | SaFlags::SA_SIGINFO).bits(),
_ => (flags - SaFlags::SA_SIGINFO).bits(),
};
s.sa_mask = mask.sigset;
SigAction { sigaction: s }
}
/// Returns the flags set on the action.
pub fn flags(&self) -> SaFlags {
SaFlags::from_bits_truncate(self.sigaction.sa_flags)
}
/// Returns the set of signals that are blocked during execution of the action's
/// signal-catching function.
pub fn mask(&self) -> SigSet {
SigSet { sigset: self.sigaction.sa_mask }
}
/// Returns the action's handler.
pub fn handler(&self) -> SigHandler {
match self.sigaction.sa_sigaction {
libc::SIG_DFL => SigHandler::SigDfl,
libc::SIG_IGN => SigHandler::SigIgn,
f if self.flags().contains(SaFlags::SA_SIGINFO) =>
SigHandler::SigAction( unsafe { mem::transmute(f) } ),
f => SigHandler::Handler( unsafe { mem::transmute(f) } ),
}
}
}
/// Changes the action taken by a process on receipt of a specific signal.
///
/// `signal` can be any signal except `SIGKILL` or `SIGSTOP`. On success, it returns the previous
/// action for the given signal. If `sigaction` fails, no new signal handler is installed.
///
/// # Safety
///
/// Signal handlers may be called at any point during execution, which limits what is safe to do in
/// the body of the signal-catching function. Be certain to only make syscalls that are explicitly
/// marked safe for signal handlers and only share global data using atomics.
pub unsafe fn sigaction(signal: Signal, sigaction: &SigAction) -> Result<SigAction> {
let mut oldact = mem::uninitialized::<libc::sigaction>();
let res =
libc::sigaction(signal as libc::c_int, &sigaction.sigaction as *const libc::sigaction, &mut oldact as *mut libc::sigaction);
Errno::result(res).map(|_| SigAction { sigaction: oldact })
}
/// Signal management (see [signal(3p)](http://pubs.opengroup.org/onlinepubs/9699919799/functions/signal.html))
///
/// Installs `handler` for the given `signal`, returning the previous signal
/// handler. `signal` should only be used following another call to `signal` or
/// if the current handler is the default. The return value of `signal` is
/// undefined after setting the handler with [`sigaction`][SigActionFn].
///
/// # Safety
///
/// If the pointer to the previous signal handler is invalid, undefined
/// behavior could be invoked when casting it back to a [`SigAction`][SigActionStruct].
///
/// # Examples
///
/// Ignore `SIGINT`:
///
/// ```no_run
/// # use nix::sys::signal::{self, Signal, SigHandler};
/// unsafe { signal::signal(Signal::SIGINT, SigHandler::SigIgn) }.unwrap();
/// ```
///
/// Use a signal handler to set a flag variable:
///
/// ```no_run
/// # #[macro_use] extern crate lazy_static;
/// # extern crate libc;
/// # extern crate nix;
/// # use std::sync::atomic::{AtomicBool, Ordering};
/// # use nix::sys::signal::{self, Signal, SigHandler};
/// lazy_static! {
/// static ref SIGNALED: AtomicBool = AtomicBool::new(false);
/// }
///
/// extern fn handle_sigint(signal: libc::c_int) {
/// let signal = Signal::from_c_int(signal).unwrap();
/// SIGNALED.store(signal == Signal::SIGINT, Ordering::Relaxed);
/// }
///
/// fn main() {
/// let handler = SigHandler::Handler(handle_sigint);
/// unsafe { signal::signal(Signal::SIGINT, handler) }.unwrap();
/// }
/// ```
///
/// # Errors
///
/// Returns [`Error::UnsupportedOperation`] if `handler` is
/// [`SigAction`][SigActionStruct]. Use [`sigaction`][SigActionFn] instead.
///
/// `signal` also returns any error from `libc::signal`, such as when an attempt
/// is made to catch a signal that cannot be caught or to ignore a signal that
/// cannot be ignored.
///
/// [`Error::UnsupportedOperation`]: ../../enum.Error.html#variant.UnsupportedOperation
/// [SigActionStruct]: struct.SigAction.html
/// [sigactionFn]: fn.sigaction.html
pub unsafe fn signal(signal: Signal, handler: SigHandler) -> Result<SigHandler> {
let signal = signal as libc::c_int;
let res = match handler {
SigHandler::SigDfl => libc::signal(signal, libc::SIG_DFL),
SigHandler::SigIgn => libc::signal(signal, libc::SIG_IGN),
SigHandler::Handler(handler) => libc::signal(signal, handler as libc::sighandler_t),
SigHandler::SigAction(_) => return Err(Error::UnsupportedOperation),
};
Errno::result(res).map(|oldhandler| {
match oldhandler {
libc::SIG_DFL => SigHandler::SigDfl,
libc::SIG_IGN => SigHandler::SigIgn,
f => SigHandler::Handler(mem::transmute(f)),
}
})
}
/// Manages the signal mask (set of blocked signals) for the calling thread.
///
/// If the `set` parameter is `Some(..)`, then the signal mask will be updated with the signal set.
/// The `how` flag decides the type of update. If `set` is `None`, `how` will be ignored,
/// and no modification will take place.
///
/// If the 'oldset' parameter is `Some(..)` then the current signal mask will be written into it.
///
/// If both `set` and `oldset` is `Some(..)`, the current signal mask will be written into oldset,
/// and then it will be updated with `set`.
///
/// If both `set` and `oldset` is None, this function is a no-op.
///
/// For more information, visit the [`pthread_sigmask`](http://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_sigmask.html),
/// or [`sigprocmask`](http://pubs.opengroup.org/onlinepubs/9699919799/functions/sigprocmask.html) man pages.
pub fn pthread_sigmask(how: SigmaskHow,
set: Option<&SigSet>,
oldset: Option<&mut SigSet>) -> Result<()> {
if set.is_none() && oldset.is_none() {
return Ok(())
}
let res = unsafe {
// if set or oldset is None, pass in null pointers instead
libc::pthread_sigmask(how as libc::c_int,
set.map_or_else(ptr::null::<libc::sigset_t>,
|s| &s.sigset as *const libc::sigset_t),
oldset.map_or_else(ptr::null_mut::<libc::sigset_t>,
|os| &mut os.sigset as *mut libc::sigset_t))
};
Errno::result(res).map(drop)
}
/// Examine and change blocked signals.
///
/// For more informations see the [`sigprocmask` man
/// pages](http://pubs.opengroup.org/onlinepubs/9699919799/functions/sigprocmask.html).
pub fn sigprocmask(how: SigmaskHow, set: Option<&SigSet>, oldset: Option<&mut SigSet>) -> Result<()> {
if set.is_none() && oldset.is_none() {
return Ok(())
}
let res = unsafe {
// if set or oldset is None, pass in null pointers instead
libc::sigprocmask(how as libc::c_int,
set.map_or_else(ptr::null::<libc::sigset_t>,
|s| &s.sigset as *const libc::sigset_t),
oldset.map_or_else(ptr::null_mut::<libc::sigset_t>,
|os| &mut os.sigset as *mut libc::sigset_t))
};
Errno::result(res).map(drop)
}
pub fn kill<T: Into<Option<Signal>>>(pid: ::unistd::Pid, signal: T) -> Result<()> {
let res = unsafe { libc::kill(pid.into(),
match signal.into() {
Some(s) => s as libc::c_int,
None => 0,
}) };
Errno::result(res).map(drop)
}
pub fn raise(signal: Signal) -> Result<()> {
let res = unsafe { libc::raise(signal as libc::c_int) };
Errno::result(res).map(drop)
}
#[cfg(target_os = "freebsd")]
pub type type_of_thread_id = libc::lwpid_t;
#[cfg(target_os = "linux")]
pub type type_of_thread_id = libc::pid_t;
/// Used to request asynchronous notification of certain events, for example,
/// with POSIX AIO, POSIX message queues, and POSIX timers.
// sigval is actually a union of a int and a void*. But it's never really used
// as a pointer, because neither libc nor the kernel ever dereference it. nix
// therefore presents it as an intptr_t, which is how kevent uses it.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SigevNotify {
/// No notification will be delivered
SigevNone,
/// The signal given by `signal` will be delivered to the process. The
/// value in `si_value` will be present in the `si_value` field of the
/// `siginfo_t` structure of the queued signal.
SigevSignal { signal: Signal, si_value: libc::intptr_t },
// Note: SIGEV_THREAD is not implemented because libc::sigevent does not
// expose a way to set the union members needed by SIGEV_THREAD.
/// A new `kevent` is posted to the kqueue `kq`. The `kevent`'s `udata`
/// field will contain the value in `udata`.
#[cfg(any(target_os = "dragonfly", target_os = "freebsd"))]
SigevKevent { kq: RawFd, udata: libc::intptr_t },
/// The signal `signal` is queued to the thread whose LWP ID is given in
/// `thread_id`. The value stored in `si_value` will be present in the
/// `si_value` of the `siginfo_t` structure of the queued signal.
#[cfg(any(target_os = "freebsd", target_os = "linux"))]
SigevThreadId { signal: Signal, thread_id: type_of_thread_id,
si_value: libc::intptr_t },
}
#[cfg(not(target_os = "openbsd"))]
mod sigevent {
use libc;
use std::mem;
use std::ptr;
use std::fmt::{self, Debug};
use super::SigevNotify;
#[cfg(any(target_os = "freebsd", target_os = "linux"))]
use super::type_of_thread_id;
/// Used to request asynchronous notification of the completion of certain
/// events, such as POSIX AIO and timers.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct SigEvent {
sigevent: libc::sigevent
}
impl SigEvent {
/// **Note:** this constructor does not allow the user to set the
/// `sigev_notify_kevent_flags` field. That's considered ok because on FreeBSD
/// at least those flags don't do anything useful. That field is part of a
/// union that shares space with the more genuinely useful fields.
///
/// **Note:** This constructor also doesn't allow the caller to set the
/// `sigev_notify_function` or `sigev_notify_attributes` fields, which are
/// required for `SIGEV_THREAD`. That's considered ok because on no operating
/// system is `SIGEV_THREAD` the most efficient way to deliver AIO
/// notification. FreeBSD and DragonFly BSD programs should prefer `SIGEV_KEVENT`.
/// Linux, Solaris, and portable programs should prefer `SIGEV_THREAD_ID` or
/// `SIGEV_SIGNAL`. That field is part of a union that shares space with the
/// more genuinely useful `sigev_notify_thread_id`
pub fn new(sigev_notify: SigevNotify) -> SigEvent {
let mut sev = unsafe { mem::zeroed::<libc::sigevent>()};
sev.sigev_notify = match sigev_notify {
SigevNotify::SigevNone => libc::SIGEV_NONE,
SigevNotify::SigevSignal{..} => libc::SIGEV_SIGNAL,
#[cfg(any(target_os = "dragonfly", target_os = "freebsd"))]
SigevNotify::SigevKevent{..} => libc::SIGEV_KEVENT,
#[cfg(target_os = "freebsd")]
SigevNotify::SigevThreadId{..} => libc::SIGEV_THREAD_ID,
#[cfg(all(target_os = "linux", target_env = "gnu", not(target_arch = "mips")))]
SigevNotify::SigevThreadId{..} => libc::SIGEV_THREAD_ID,
#[cfg(any(all(target_os = "linux", target_env = "musl"), target_arch = "mips"))]
SigevNotify::SigevThreadId{..} => 4 // No SIGEV_THREAD_ID defined
};
sev.sigev_signo = match sigev_notify {
SigevNotify::SigevSignal{ signal, .. } => signal as libc::c_int,
#[cfg(any(target_os = "dragonfly", target_os = "freebsd"))]
SigevNotify::SigevKevent{ kq, ..} => kq,
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
SigevNotify::SigevThreadId{ signal, .. } => signal as libc::c_int,
_ => 0
};
sev.sigev_value.sival_ptr = match sigev_notify {
SigevNotify::SigevNone => ptr::null_mut::<libc::c_void>(),
SigevNotify::SigevSignal{ si_value, .. } => si_value as *mut libc::c_void,
#[cfg(any(target_os = "dragonfly", target_os = "freebsd"))]
SigevNotify::SigevKevent{ udata, .. } => udata as *mut libc::c_void,
#[cfg(any(target_os = "freebsd", target_os = "linux"))]
SigevNotify::SigevThreadId{ si_value, .. } => si_value as *mut libc::c_void,
};
SigEvent::set_tid(&mut sev, &sigev_notify);
SigEvent{sigevent: sev}
}
#[cfg(any(target_os = "freebsd", target_os = "linux"))]
fn set_tid(sev: &mut libc::sigevent, sigev_notify: &SigevNotify)
|
#[cfg(not(any(target_os = "freebsd", target_os = "linux")))]
fn set_tid(_sev: &mut libc::sigevent, _sigev_notify: &SigevNotify) {
}
pub fn sigevent(&self) -> libc::sigevent {
self.sigevent
}
}
impl Debug for SigEvent {
#[cfg(any(target_os = "freebsd", target_os = "linux"))]
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("SigEvent")
.field("sigev_notify", &self.sigevent.sigev_notify)
.field("sigev_signo", &self.sigevent.sigev_signo)
.field("sigev_value", &self.sigevent.sigev_value.sival_ptr)
.field("sigev_notify_thread_id",
&self.sigevent.sigev_notify_thread_id)
.finish()
}
#[cfg(not(any(target_os = "freebsd", target_os = "linux")))]
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("SigEvent")
.field("sigev_notify", &self.sigevent.sigev_notify)
.field("sigev_signo", &self.sigevent.sigev_signo)
.field("sigev_value", &self.sigevent.sigev_value.sival_ptr)
.finish()
}
}
impl<'a> From<&'a libc::sigevent> for SigEvent {
fn from(sigevent: &libc::sigevent) -> Self {
SigEvent{ sigevent: *sigevent }
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_contains() {
let mut mask = SigSet::empty();
mask.add(SIGUSR1);
assert!(mask.contains(SIGUSR1));
assert!(!mask.contains(SIGUSR2));
let all = SigSet::all();
assert!(all.contains(SIGUSR1));
assert!(all.contains(SIGUSR2));
}
#[test]
fn test_clear() {
let mut set = SigSet::all();
set.clear();
for signal in Signal::iterator() {
assert!(!set.contains(signal));
}
}
#[test]
fn test_from_str_round_trips() {
for signal in Signal::iterator() {
assert_eq!(signal.as_ref().parse::<Signal>().unwrap(), signal);
assert_eq!(signal.to_string().parse::<Signal>().unwrap(), signal);
}
}
#[test]
fn test_from_str_invalid_value() {
let errval = Err(Error::Sys(Errno::EINVAL));
assert_eq!("NOSIGNAL".parse::<Signal>(), errval);
assert_eq!("kill".parse::<Signal>(), errval);
assert_eq!("9".parse::<Signal>(), errval);
}
#[test]
fn test_extend() {
let mut one_signal = SigSet::empty();
one_signal.add(SIGUSR1);
let mut two_signals = SigSet::empty();
two_signals.add(SIGUSR2);
two_signals.extend(&one_signal);
assert!(two_signals.contains(SIGUSR1));
assert!(two_signals.contains(SIGUSR2));
}
// This test doesn't actually test get_mask functionality, see the set_mask test for that.
#[test]
fn test_thread_signal_get_mask() {
assert!(SigSet::thread_get_mask().is_ok());
}
#[test]
fn test_thread_signal_set_mask() {
let prev_mask = SigSet::thread_get_mask().expect("Failed to get existing signal mask!");
let mut test_mask = prev_mask;
test_mask.add(SIGUSR1);
assert!(test_mask.thread_set_mask().is_ok());
let new_mask = SigSet::thread_get_mask().expect("Failed to get new mask!");
assert!(new_mask.contains(SIGUSR1));
assert!(!new_mask.contains(SIGUSR2));
prev_mask.thread_set_mask().expect("Failed to revert signal mask!");
}
#[test]
fn test_thread_signal_block() {
let mut mask = SigSet::empty();
mask.add(SIGUSR1);
assert!(mask.thread_block().is_ok());
assert!(SigSet::thread_get_mask().unwrap().contains(SIGUSR1));
}
#[test]
fn test_thread_signal_unblock() {
let mut mask = SigSet::empty();
mask.add(SIGUSR1);
assert!(mask.thread_unblock().is_ok());
assert!(!SigSet::thread_get_mask().unwrap().contains(SIGUSR1));
}
#[test]
fn test_thread_signal_swap() {
let mut mask = SigSet::empty();
mask.add(SIGUSR1);
mask.thread_block().unwrap();
assert!(SigSet::thread_get_mask().unwrap().contains(SIGUSR1));
let mut mask2 = SigSet::empty();
mask2.add(SIGUSR2);
let oldmask = mask2.thread_swap_mask(SigmaskHow::SIG_SETMASK).unwrap();
assert!(oldmask.contains(SIGUSR1));
assert!(!oldmask.contains(SIGUSR2));
assert!(SigSet::thread_get_mask().unwrap().contains(SIGUSR2));
}
#[test]
fn test_sigaction() {
use libc;
extern fn test_sigaction_handler(_: libc::c_int) {}
extern fn test_sigaction_action(_: libc::c_int,
_: *mut libc::siginfo_t, _: *mut libc::c_void) {}
let handler_sig = SigHandler::Handler(test_sigaction_handler);
let flags = SaFlags::SA_ONSTACK | SaFlags::SA_RESTART | SaFlags::SA_SIGINFO;
let mut mask = SigSet::empty();
mask.add(SIGUSR1);
let action_sig = SigAction::new(handler_sig, flags, mask);
assert_eq!(action_sig.flags(), SaFlags::SA_ONSTACK | SaFlags::SA_RESTART);
assert_eq!(action_sig.handler(), handler_sig);
mask = action_sig.mask();
assert!(mask.contains(SIGUSR1));
assert!(!mask.contains(SIGUSR2));
let handler_act = SigHandler::SigAction(test_sigaction_action);
let action_act = SigAction::new(handler_act, flags, mask);
assert_eq!(action_act.handler(), handler_act);
let action_dfl = SigAction::new(SigHandler::SigDfl, flags, mask);
assert_eq!(action_dfl.handler(), SigHandler::SigDfl);
let action_ign = SigAction::new(SigHandler::SigIgn, flags, mask);
assert_eq!(action_ign.handler(), SigHandler::SigIgn);
}
// TODO(#251): Re-enable after figuring out flakiness.
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
#[test]
fn test_sigwait() {
let mut mask = SigSet::empty();
mask.add(SIGUSR1);
mask.add(SIGUSR2);
mask.thread_block().unwrap();
raise(SIGUSR1).unwrap();
assert_eq!(mask.wait().unwrap(), SIGUSR1);
}
}
|
{
sev.sigev_notify_thread_id = match *sigev_notify {
SigevNotify::SigevThreadId { thread_id, .. } => thread_id,
_ => 0 as type_of_thread_id
};
}
|
gbms_bench_test.go
|
package gbms
import (
"testing"
"github.com/cubicdaiya/bms"
)
var (
str = `zCpidUwdtQz659i3-T7xNakVZjN3VDj9k7tJLjkhiJPV8wXudzgUPQ2SdPBxmYAK-fCzWLJkBLfSwERen-CVEL3-dnTCmmkM_WbkCSUz_F2rRQyNhRwmXdrBS-jaDVgaWE
8sctA5hQcc-LQ7jF5NH2gjNBNsG8Z97nC7TWLyTfugrKRKUbi8kFjz2Db4zVmFZPi4USkkGkkY89AtX7yZdL3AmCsgtHfN5ujLJM
5SaA2QJNmNUeT-2ZTd_f9W7dtQz659i3-T7xNakVZjN3VDj9k7tJLCQBjGdtQz659i3-T7xNakVZjN3VDj9k7tJLXEX96LFFx6HLkTDaARuWJT4Ls6YFw8YAMN-UESyWWE
XHHFAY6Zh9dpd5Ph-L9-VyaaMMM_7ayNgZF4MZSU52y2ZwLJ4P48-gSHcNPKkiKCs4bsV7Gp8Z6erihCMdtQz659i3-T7xNakVZjN3VDj9k7tJLNb93_DZd3a4WyPLxECG
Dx7FN85rE4gzVMPYe_yKFzQzdtQz659i3-T7xNakVZjN3VDj9k7tJLyiBc_pS88xmVfX4iLteSUMXFR7zfp--_cTJyYacVPYQ3Zm-jAesKu5Rp-XLWLBjWdNB2C_zJCLfSdtQz659i3-T7xNakVZjN3VDj9k7tJL8xmVfX4iLteSUMXFR7zfp--_cTJyYacVPYQ3Zm-jAesKu5Rp-XLWLBjWdNB2C_zJ8xmVfX4iLteSUMXFR7zfp--_cTJyYacVPYQ3Zm-jAesKu5Rp-XLWLBjWdNB2C_zJ8xmVfX4iLteSUMXFR7zfp--_cTJyYacVPYQ3Zm-jAesKu5Rp-XLWLBjWdNB2C_zJ8xmVfX4iLteSUMXFR7zfp--_cTJyYacVPYQ3Zm-jAesKudtQz659i3-T7xNakVZjN3VDj9k7tJL5Rp-XLWLBjWdNB2C_zJ8xmVfX4iLteSUMXFR7zfp--_cTJyYacVPYQ3Zm-jAesKu5Rp-XLWLBjWdNB2C_zJ8xmVfX4iLteSUMXFR7zfp--_cTJyYacVPYQ3Zm-jAesKu5Rp-XLWLBjWdNB2C_zJ`
ptn = "dtQz659i3-T7xNakVZjN3VDj9k7tJL"
expected = 7
)
func BenchmarkBmsAlgOfGbms(b *testing.B)
|
func BenchmarkBasicAlgOfGbms(b *testing.B) {
bStr := []byte(str)
bPtn := []byte(ptn)
b.ResetTimer()
for i := 0; i < b.N; i++ {
got := searchByBasicAlgo(bStr, bPtn)
if expected != got {
b.Errorf("gbms.searchByBasicAlgo is wrong. expected: %v, got: %v", expected, got)
}
}
}
func BenchmarkBms(b *testing.B) {
for i := 0; i < b.N; i++ {
got := bms.Search(str, ptn)
if expected != got {
b.Errorf("bms.Search is wrong. expected: %v, got: %v", expected, got)
}
}
}
|
{
bStr := []byte(str)
bPtn := []byte(ptn)
b.ResetTimer()
for i := 0; i < b.N; i++ {
got := searchByBmsAlgo(bStr, bPtn)
if expected != got {
b.Errorf("gbms.searchByBmsAlgo is wrong. expected: %v, got: %v", expected, got)
}
}
}
|
root.go
|
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"sonarci/connection/http"
"sonarci/sonar"
sonarFactory "sonarci/sonar/factory"
"time"
)
const (
flagServer = "server"
flagToken = "token"
flagTimout = "timeout"
timeoutDefault = 30000
)
func
|
() *cobra.Command {
rootCmd := &cobra.Command{
Use: "sonarci",
Short: "A simple tool for SonarQube integration",
Long: "SonarCI is a CLI library for help you integrate and use SonarQube inspections.",
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
}
rootCmd.PersistentFlags().StringP(flagServer, "s", "", "SonarQube server address")
rootCmd.PersistentFlags().StringP(flagToken, "o", "", "Authentication Token")
rootCmd.PersistentFlags().IntP(flagTimout, "t", 0, fmt.Sprintf("Timeout in milliseconds. Default value is %d ms", timeoutDefault))
_ = rootCmd.MarkPersistentFlagRequired(flagServer)
_ = rootCmd.MarkPersistentFlagRequired(flagToken)
rootCmd.AddCommand(NewServerVersionCmd())
rootCmd.AddCommand(NewSearchCmd())
rootCmd.AddCommand(NewValidateCmd())
rootCmd.AddCommand(NewDecorateCmd())
return rootCmd
}
func createSonarApi(server string, token string, timeout time.Duration) sonar.Api {
conn := http.NewConnection(server, token, timeout)
return sonarFactory.CreateSonarApi(conn)
}
|
NewRootCmd
|
route.rs
|
use crate::node::{Bit, ByteString, Node, ID_LENGTH};
use std::cell::RefCell;
use std::iter::Iterator;
use std::rc::Rc;
use std::rc::Weak;
const K_BUCKET_SIZE: usize = 4; // Optimal K_BUCKET_SIZE is 20, for testing purposes, use 4
// Holds the Vertex node in a RefCell for shared mutability
type LeafNode = Option<Rc<RefCell<Vertex>>>;
// This represents the K-bucket described in the original paper
// K-bucket holds K number of nodes which stores <IP addr, UDP port, Node ID>
#[derive(Debug)]
pub struct KBucket {
node_bucket: Vec<Node>,
depth: usize,
}
impl KBucket {
pub fn new() -> Self {
KBucket {
node_bucket: Vec::with_capacity(K_BUCKET_SIZE),
depth: 0,
}
}
pub fn sort(&mut self) {}
// pub fn get_latest(&self) -> &Node<K, V> {
// // TODO
// }
// pub fn get_oldest(&self) -> &Node<K, V> {
// // TODO
// }
fn split(&self) -> (Option<KBucket>, Option<KBucket>) {
let mut left = KBucket::new();
let mut right = KBucket::new();
left.depth = self.depth + 1;
right.depth = self.depth + 1;
for node in &self.node_bucket {
match node.node_id.index(self.depth + 1) {
1 => left.node_bucket.push(*node),
0 => right.node_bucket.push(*node),
_ => unreachable!(),
}
}
(Some(left), Some(right))
}
}
// Represents a single vertex in the trie of the Route Table.
// This vertex could have a k_bucket in which case it is a leaf.
// If the vertex does not have a k_bucket, it is an inner vertex.
#[derive(Debug)]
pub struct Vertex {
bit: Bit,
k_bucket: Option<KBucket>,
parent: Option<Weak<RefCell<Vertex>>>,
left: LeafNode,
right: LeafNode,
}
impl Vertex {
fn new(bit: Bit) -> Vertex {
Vertex {
bit,
k_bucket: Some(KBucket::new()),
parent: None,
left: None,
right: None,
}
}
// Creates 2 vertices and splits the current k_bucket and instantiates 2 new k_bucket
// with the correseponding nodes
fn split(
vertex: &Rc<RefCell<Vertex>>,
) -> (Option<Rc<RefCell<Vertex>>>, Option<Rc<RefCell<Vertex>>>) {
// Allocate two new vertices for left and right
let mut left = Vertex::new(Bit::One);
let mut right = Vertex::new(Bit::Zero);
// Split the buckets into tuple
let tuple = vertex.borrow().k_bucket.as_ref().unwrap().split();
// Deallocate the current bucket
vertex.borrow_mut().k_bucket = None;
// Link the new k_buckets to left and right vertices
left.k_bucket = tuple.0;
right.k_bucket = tuple.1;
(
Some(Rc::new(RefCell::new(left))),
Some(Rc::new(RefCell::new(right))),
)
}
// Recursively adds a node to the current vertex by finding the closest matching k_bucket
fn add_node<I: Iterator<Item = u8>>(
vertex: &Rc<RefCell<Vertex>>,
node: Node,
node_iter: &mut I,
node_id: &ByteString,
prefix_contained: bool,
)
|
}
// Trie structure representing the Route table composed of k_buckets
#[derive(Debug)]
pub struct RouteTable {
pub length: u64,
node_id: ByteString,
root: LeafNode,
}
// Implementation of the routing table composed of k_buckets
impl RouteTable {
pub fn empty_new(node_id: ByteString) -> Self {
RouteTable {
length: 0,
node_id,
root: Some(Rc::new(RefCell::new(Vertex::new(Bit::Root)))),
}
}
// Add vertex to the trie that contains k_bucket
pub fn add_vertex() {}
// Add a node to the k_bucket starting from the root of the trie
pub fn add_node(&mut self, node: Node) {
match self.root.as_mut() {
Some(x) => {
let mut iter = node.node_id.into_iter();
Vertex::add_node(x, node, &mut iter, &self.node_id, true);
// TODO: Check invariant and edge cases
self.length += 1;
}
None => {
// Root does not exist, Error handling
panic!("Root does not exist");
}
}
}
// Finds the closest alpha (system wide paramter) number of nodes to the node id and returns it
fn find_closest(&self, node_id: [u8; ID_LENGTH]) -> Vec<Node> {
let alpha_nodes: Vec<Node> = Vec::new();
match self.root {
Some(ref x) => match &x.borrow_mut().k_bucket {
Some(bucket) => {}
None => {}
},
None => {}
}
return alpha_nodes;
}
}
|
{
let has_k_bucket: bool;
let mut split: bool = false;
{
// Immutably borrow through the RefCell
// Check if there is a k_bucket
// This stores the result in has_k_bucket, and drops the borrow as it exits this scope
has_k_bucket = vertex.borrow().k_bucket.is_some();
// End borrow
}
match has_k_bucket {
// Base case: Vertex has a k_bucket
true => {
{
// Borrow the vertex mutably
let mut vert = vertex.borrow_mut();
let bucket = vert.k_bucket.as_mut().unwrap();
// Check that k_bucket is not full, add node to the k_bucket, and return
if bucket.node_bucket.len() < K_BUCKET_SIZE {
bucket.node_bucket.push(node);
return;
}
// If it didn't return, the k_bucket is full.
// Remember full node_id length edge cases.
if prefix_contained {
let node_iter_next: u8 = node_iter.next().unwrap();
// Check that current vertex is a prefix of the node id to be added
// If it isn't, perform logic to replace the LRU cache
match node_iter_next {
1 => {
if !matches!(vert.bit, Bit::One) {
split = false;
// TODO: Handle logic for pinging and replacing current k-bucket list
}
}
0 => {
if !matches!(vert.bit, Bit::Zero) {
split = false;
// TODO: Handle logic for pinging and replacing current k-bucket list
}
}
_ => {}
}
}
// End borrow
}
// If it is contained in the prefix, proceed to splitting process
if split {
// Split the k_bucket into two
let (left_vert, right_vert) = Vertex::split(vertex);
{
// Mutably borrow the Left and Right children, and add their parent as a Weak pointer
left_vert.as_ref().unwrap().borrow_mut().parent =
Some(Rc::downgrade(&Rc::clone(vertex)));
right_vert.as_ref().unwrap().borrow_mut().parent =
Some(Rc::downgrade(&Rc::clone(vertex)));
// End borrow
}
{
// Mutably borrow the parent, and set the Left and Right child fields
vertex.borrow_mut().left = left_vert;
vertex.borrow_mut().right = right_vert;
// End borrow
}
// Recursively trickle down once more to add the node
Vertex::add_node(vertex, node, node_iter, &node_id, false);
}
}
// Recursive step: Vertex has no k_bucket
// Check next bit, borrow vertex, and recursively trickle down
false => match node_iter.next().unwrap() {
1 => match &vertex.borrow().left {
Some(vert) => {
Vertex::add_node(vert, node, node_iter, &node_id, prefix_contained);
}
None => {}
},
0 => match &vertex.borrow().right {
Some(vert) => {
Vertex::add_node(vert, node, node_iter, &node_id, prefix_contained);
}
None => {}
},
_ => unreachable!(),
},
}
}
|
date_attribute.py
|
import ciso8601
import dateutil.parser
from cartographer.field_types import SchemaAttribute
from cartographer.utils.datetime import as_utc, make_naive
class DateAttribute(SchemaAttribute):
@classmethod
def format_value_for_json(cls, value):
return as_utc(value).isoformat()
def from_json(self, serialized_value):
if self.is_nullable and serialized_value is None:
return None
try:
# ciso8601 is significantly faster than dateutil.parser for parsing iso8601 strings, so we try it first
parsed_value = ciso8601.parse_datetime(serialized_value)
assert parsed_value is not None # Caveat: asserts won't run if python is run with -O.
|
except Exception as e:
parsed_value = dateutil.parser.parse(serialized_value)
return make_naive(parsed_value)
| |
pitz_daily_runner.py
|
# -*- coding: utf-8 -*-
"""Pitz Daily
This case uses the pitzDaily example from the OpenFOAM tutorials
and varies two parameters: Reynolds number and height of the inlet.
It returns the pressure difference between inlet and outlet.
"""
|
from active_learning_cfd.cfd_case import CFDCase
import os
class PitzDaily(CFDCase):
mesher = "blockMesh"
solver = "simpleFoam"
template = "pitzDaily"
parameter_names = ("reynolds", "entryHeight")
output_list = (("deltaP", "subtract\(p\) = (.+)"),)
def __call__(self, parameters):
assert len(parameters) == len(self.parameter_names)
parameter_dict = dict(zip(self.parameter_names, parameters))
parameter_dict["reynolds"] = np.power(10, parameter_dict["reynolds"])
self.solve(parameter_dict)
return self.results["deltaP"]
if __name__ == "__main__":
case = PitzDaily()
reynolds = 50800.0
entryHeight = 25.4
print("deltaP = {}".format(case([np.log10(reynolds), entryHeight])))
|
import numpy as np
|
ext_shader_demote_to_helper_invocation.rs
|
#[doc = "<s>Vulkan Manual Page</s> · Constant"]
#[doc(alias = "VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_SPEC_VERSION")]
pub const EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_SPEC_VERSION: u32 = 1;
#[doc = "<s>Vulkan Manual Page</s> · Constant"]
#[doc(alias = "VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME")]
pub const EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME: *const std::os::raw::c_char = crate::cstr!("VK_EXT_shader_demote_to_helper_invocation");
#[doc = "Provided by [`crate::extensions::ext_shader_demote_to_helper_invocation`]"]
impl crate::vk1_0::StructureType {
pub const PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT: Self = Self(1000276000);
}
impl<'a> crate::ExtendableFrom<'a, PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT> for crate::vk1_0::DeviceCreateInfoBuilder<'a> {}
impl<'a> crate::ExtendableFrom<'a, PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'_>> for crate::vk1_0::DeviceCreateInfoBuilder<'a> {}
impl<'a> crate::ExtendableFrom<'a, PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT> for crate::vk1_1::PhysicalDeviceFeatures2Builder<'a> {}
impl<'a> crate::ExtendableFrom<'a, PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'_>> for crate::vk1_1::PhysicalDeviceFeatures2Builder<'a> {}
#[doc = "[Vulkan Manual Page](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT.html) · Structure"]
#[doc(alias = "VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT")]
#[derive(Copy, Clone)]
#[repr(C)]
pub struct PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT {
pub s_type: crate::vk1_0::StructureType,
pub p_next: *mut std::ffi::c_void,
pub shader_demote_to_helper_invocation: crate::vk1_0::Bool32,
}
impl PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT {
pub const STRUCTURE_TYPE: crate::vk1_0::StructureType = crate::vk1_0::StructureType::PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT;
}
impl Default for PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT {
fn default() -> Self {
Self { s_type: Self::STRUCTURE_TYPE, p_next: std::ptr::null_mut(), shader_demote_to_helper_invocation: Default::default() }
}
}
impl std::fmt::Debug for PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT").field("s_type", &self.s_type).field("p_next", &self.p_next).field("shader_demote_to_helper_invocation", &(self.shader_demote_to_helper_invocation != 0)).finish()
}
}
impl PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT {
#[inline]
pub fn into_builder<'a>(self) -> PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a> {
PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder(self, std::marker::PhantomData)
}
}
#[derive(Copy, Clone)]
#[doc = "[Vulkan Manual Page](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT.html) · Builder of [`PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT`]"]
#[repr(transparent)]
pub struct PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a>(PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT, std::marker::PhantomData<&'a ()>);
impl<'a> PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a> {
#[inline]
pub fn new() -> PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a> {
PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder(Default::default(), std::marker::PhantomData)
}
#[inline]
#[must_use]
pub fn shader_demote_to_helper_invocation(mut self, shader_demote_to_helper_invocation: bool) -> Self {
self.0.shader_demote_to_helper_invocation = shader_demote_to_helper_invocation as _;
self
}
#[inline]
#[doc = r" Discards all lifetime information."]
#[doc = r" Use the `Deref` and `DerefMut` implementations if possible."]
pub fn build_dangling(self) -> PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT {
|
impl<'a> std::default::Default for PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a> {
fn default() -> PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a> {
Self::new()
}
}
impl<'a> std::fmt::Debug for PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
std::fmt::Debug::fmt(&self.0, f)
}
}
impl<'a> std::ops::Deref for PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a> {
type Target = PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<'a> std::ops::DerefMut for PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
|
self.0
}
}
|
summarizer.py
|
from sumy.parsers.plaintext import PlaintextParser #We're choosing a plaintext parser here, other parsers available for HTML etc.
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer #We're choosing Lexrank, other algorithms are also built in
def get_summary(text):
# file = "plain_text.txt" #name of the plain-text file
# parser = PlaintextParser.from_file(file, Tokenizer("english"))
parser=PlaintextParser.from_string(text,Tokenizer("English"))
|
summarizer = LexRankSummarizer()
summary = summarizer(parser.document, 5) #Summarize the document with 5 sentences
# for sentence in summary:
# print(sentence)
return summary
| |
lab.go
|
// *** WARNING: this file was generated by the Kulado Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package devtest
import (
"github.com/pkg/errors"
"github.com/kulado/kulado/sdk/go/kulado"
)
// Manages a Dev Test Lab.
//
// > This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/dev_test_lab.html.markdown.
type Lab struct {
s *kulado.ResourceState
}
|
func NewLab(ctx *kulado.Context,
name string, args *LabArgs, opts ...kulado.ResourceOpt) (*Lab, error) {
if args == nil || args.ResourceGroupName == nil {
return nil, errors.New("missing required argument 'ResourceGroupName'")
}
inputs := make(map[string]interface{})
if args == nil {
inputs["location"] = nil
inputs["name"] = nil
inputs["resourceGroupName"] = nil
inputs["storageType"] = nil
inputs["tags"] = nil
} else {
inputs["location"] = args.Location
inputs["name"] = args.Name
inputs["resourceGroupName"] = args.ResourceGroupName
inputs["storageType"] = args.StorageType
inputs["tags"] = args.Tags
}
inputs["artifactsStorageAccountId"] = nil
inputs["defaultPremiumStorageAccountId"] = nil
inputs["defaultStorageAccountId"] = nil
inputs["keyVaultId"] = nil
inputs["premiumDataDiskStorageAccountId"] = nil
inputs["uniqueIdentifier"] = nil
s, err := ctx.RegisterResource("azure:devtest/lab:Lab", name, true, inputs, opts...)
if err != nil {
return nil, err
}
return &Lab{s: s}, nil
}
// GetLab gets an existing Lab resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetLab(ctx *kulado.Context,
name string, id kulado.ID, state *LabState, opts ...kulado.ResourceOpt) (*Lab, error) {
inputs := make(map[string]interface{})
if state != nil {
inputs["artifactsStorageAccountId"] = state.ArtifactsStorageAccountId
inputs["defaultPremiumStorageAccountId"] = state.DefaultPremiumStorageAccountId
inputs["defaultStorageAccountId"] = state.DefaultStorageAccountId
inputs["keyVaultId"] = state.KeyVaultId
inputs["location"] = state.Location
inputs["name"] = state.Name
inputs["premiumDataDiskStorageAccountId"] = state.PremiumDataDiskStorageAccountId
inputs["resourceGroupName"] = state.ResourceGroupName
inputs["storageType"] = state.StorageType
inputs["tags"] = state.Tags
inputs["uniqueIdentifier"] = state.UniqueIdentifier
}
s, err := ctx.ReadResource("azure:devtest/lab:Lab", name, id, inputs, opts...)
if err != nil {
return nil, err
}
return &Lab{s: s}, nil
}
// URN is this resource's unique name assigned by Kulado.
func (r *Lab) URN() *kulado.URNOutput {
return r.s.URN()
}
// ID is this resource's unique identifier assigned by its provider.
func (r *Lab) ID() *kulado.IDOutput {
return r.s.ID()
}
// The ID of the Storage Account used for Artifact Storage.
func (r *Lab) ArtifactsStorageAccountId() *kulado.StringOutput {
return (*kulado.StringOutput)(r.s.State["artifactsStorageAccountId"])
}
// The ID of the Default Premium Storage Account for this Dev Test Lab.
func (r *Lab) DefaultPremiumStorageAccountId() *kulado.StringOutput {
return (*kulado.StringOutput)(r.s.State["defaultPremiumStorageAccountId"])
}
// The ID of the Default Storage Account for this Dev Test Lab.
func (r *Lab) DefaultStorageAccountId() *kulado.StringOutput {
return (*kulado.StringOutput)(r.s.State["defaultStorageAccountId"])
}
// The ID of the Key used for this Dev Test Lab.
func (r *Lab) KeyVaultId() *kulado.StringOutput {
return (*kulado.StringOutput)(r.s.State["keyVaultId"])
}
// Specifies the supported Azure location where the Dev Test Lab should exist. Changing this forces a new resource to be created.
func (r *Lab) Location() *kulado.StringOutput {
return (*kulado.StringOutput)(r.s.State["location"])
}
// Specifies the name of the Dev Test Lab. Changing this forces a new resource to be created.
func (r *Lab) Name() *kulado.StringOutput {
return (*kulado.StringOutput)(r.s.State["name"])
}
// The ID of the Storage Account used for Storage of Premium Data Disk.
func (r *Lab) PremiumDataDiskStorageAccountId() *kulado.StringOutput {
return (*kulado.StringOutput)(r.s.State["premiumDataDiskStorageAccountId"])
}
// The name of the resource group under which the Dev Test Lab resource has to be created. Changing this forces a new resource to be created.
func (r *Lab) ResourceGroupName() *kulado.StringOutput {
return (*kulado.StringOutput)(r.s.State["resourceGroupName"])
}
// The type of storage used by the Dev Test Lab. Possible values are `Standard` and `Premium`. Defaults to `Premium`. Changing this forces a new resource to be created.
func (r *Lab) StorageType() *kulado.StringOutput {
return (*kulado.StringOutput)(r.s.State["storageType"])
}
// A mapping of tags to assign to the resource.
func (r *Lab) Tags() *kulado.MapOutput {
return (*kulado.MapOutput)(r.s.State["tags"])
}
// The unique immutable identifier of the Dev Test Lab.
func (r *Lab) UniqueIdentifier() *kulado.StringOutput {
return (*kulado.StringOutput)(r.s.State["uniqueIdentifier"])
}
// Input properties used for looking up and filtering Lab resources.
type LabState struct {
// The ID of the Storage Account used for Artifact Storage.
ArtifactsStorageAccountId interface{}
// The ID of the Default Premium Storage Account for this Dev Test Lab.
DefaultPremiumStorageAccountId interface{}
// The ID of the Default Storage Account for this Dev Test Lab.
DefaultStorageAccountId interface{}
// The ID of the Key used for this Dev Test Lab.
KeyVaultId interface{}
// Specifies the supported Azure location where the Dev Test Lab should exist. Changing this forces a new resource to be created.
Location interface{}
// Specifies the name of the Dev Test Lab. Changing this forces a new resource to be created.
Name interface{}
// The ID of the Storage Account used for Storage of Premium Data Disk.
PremiumDataDiskStorageAccountId interface{}
// The name of the resource group under which the Dev Test Lab resource has to be created. Changing this forces a new resource to be created.
ResourceGroupName interface{}
// The type of storage used by the Dev Test Lab. Possible values are `Standard` and `Premium`. Defaults to `Premium`. Changing this forces a new resource to be created.
StorageType interface{}
// A mapping of tags to assign to the resource.
Tags interface{}
// The unique immutable identifier of the Dev Test Lab.
UniqueIdentifier interface{}
}
// The set of arguments for constructing a Lab resource.
type LabArgs struct {
// Specifies the supported Azure location where the Dev Test Lab should exist. Changing this forces a new resource to be created.
Location interface{}
// Specifies the name of the Dev Test Lab. Changing this forces a new resource to be created.
Name interface{}
// The name of the resource group under which the Dev Test Lab resource has to be created. Changing this forces a new resource to be created.
ResourceGroupName interface{}
// The type of storage used by the Dev Test Lab. Possible values are `Standard` and `Premium`. Defaults to `Premium`. Changing this forces a new resource to be created.
StorageType interface{}
// A mapping of tags to assign to the resource.
Tags interface{}
}
|
// NewLab registers a new resource with the given unique name, arguments, and options.
|
paths.ts
|
class Path<T> {
children: {
[key: string]: Path<T>;
} = {};
$: {
[key: string]: Path<T>;
} = {};
$x?: Path<T>;
$xx?: Path<T>;
value?: {
[ref: string]: T;
};
}
export interface Lookup<T> {
keys: { [key: string]: string };
value: { [ref: string]: T };
path: string;
fullPath: string;
}
class Lookuper<T> {
private readonly parent: Path<T>;
private child?: Path<T>;
private readonly parts: string[];
private result: Lookup<T>[] = [];
isEol: boolean = false;
constructor(parent: Path<T>, parts: string[]) {
this.parent = parent;
this.parts = parts;
}
private _addResult(
keys: string[][],
value: { [ref: string]: T },
pathUntil: number
) {
this.result.push({
keys: keys.reduce((res: { [key: string]: string }, [name, value]) => {
res[name] = value;
return res;
}, {}),
value,
path: this.parts.slice(0, pathUntil).join('.'),
fullPath: this.parts.join('.'),
});
|
this._lookup(this.parent);
this.isEol =
!this.child ||
(Object.keys(this.child.children).length === 0 &&
(!this.child.value || Object.keys(this.child.value).length === 0) &&
Object.keys(this.child.$).length === 0 &&
!this.child.$x &&
!this.child.$xx);
return this.result;
}
private _lookup(
parent: Path<T>,
index = 0,
keys: string[][] = [],
pathUntil = 0
) {
if (!parent || pathUntil >= this.parts.length + 1) {
return;
}
if (index === this.parts.length) {
this.child = parent;
}
if (parent.$xx && parent.$xx.value) {
this._addResult(keys, parent.$xx.value, pathUntil);
}
for (const [name, p] of Object.entries(parent.$)) {
this._lookup(
p,
index + 1,
keys.slice().concat([[name, this.parts[index]]]),
index + 1
);
}
if (parent.$x) {
this._lookup(parent.$x, index + 1, keys, pathUntil);
if (parent.$x.value) {
const restOfPathIsWildcard = this.parts
.slice(index)
.every(p => p === '*' || p === '**');
if (restOfPathIsWildcard) {
this._addResult(keys, parent.$x.value, pathUntil);
}
}
}
if (index === this.parts.length && parent.value) {
this._addResult(keys, parent.value, pathUntil);
} else if (parent.children) {
this._lookup(
parent.children[this.parts[index]],
index + 1,
keys,
index + 1
);
}
}
}
export class Paths<T> {
private map: Path<T> = new Path<T>();
private refs: { [key: string]: string } = {};
add = (path: string, ref: string, input: T) => {
this.refs[ref] = path;
const parts = path.split('.');
let parent = this.map;
for (let part of parts) {
if (part === '*') {
parent = parent.$x = parent.$x || new Path();
} else if (part === '**') {
parent = parent.$xx = parent.$xx || new Path();
} else if (part.startsWith('$')) {
parent = parent.$[part] = parent.$[part] || new Path();
} else {
parent = parent.children[part] = parent.children[part] || new Path();
}
}
parent.value = parent.value || {};
parent.value[ref] = input;
};
lookupByString(path: string): Lookup<T>[] {
return this.lookup(path.split('.')).lookups;
}
lookup(path: string[]): { isEol: boolean; lookups: Lookup<T>[] } {
const lookup = new Lookuper<T>(this.map, path);
const lookups = lookup.lookup();
return {
isEol: lookup.isEol,
lookups,
};
}
remove(ref: string) {
const path = this.refs[ref];
if (!path) return;
const parts = path.split('.');
let parent: Path<T> | undefined = this.map;
for (let part of parts) {
if (part.startsWith('$')) {
parent = parent?.$[part];
} else if (part === '*') {
parent = parent?.$x;
} else if (part === '**') {
parent = parent?.$xx;
break;
} else {
parent = parent?.children[part];
}
}
if (parent && parent.value) {
delete parent.value[ref];
if (Object.keys(parent.value).length === 0) {
delete parent.value;
}
}
}
}
|
}
lookup(): Lookup<T>[] {
|
device.rs
|
use auxil::ShaderStage;
use hal::{
adapter::MemoryProperties, buffer, device, format, image, memory, pass, pool, pso,
pso::VertexInputRate, query, queue::QueueFamilyId, window,
};
use winapi::{
shared::{dxgi, dxgiformat, dxgitype, minwindef::TRUE, windef::HWND, winerror},
um::{d3d11, d3d11_1, d3d11sdklayers, d3dcommon},
};
use wio::com::ComPtr;
use std::{
borrow::Borrow,
fmt, mem,
ops::Range,
ptr,
sync::{Arc, Weak},
};
use parking_lot::{Condvar, Mutex, RwLock};
use crate::{
conv,
debug::{set_debug_name, set_debug_name_with_suffix, verify_debug_ascii},
internal, shader, Backend, Buffer, BufferView, CommandBuffer, CommandPool, ComputePipeline,
DescriptorContent, DescriptorIndex, DescriptorPool, DescriptorSet, DescriptorSetInfo,
DescriptorSetLayout, Fence, Framebuffer, GraphicsPipeline, Image, ImageView, InternalBuffer,
InternalImage, Memory, MultiStageData, PipelineLayout, QueryPool, RawFence,
RegisterAccumulator, RegisterData, RenderPass, ResourceIndex, Sampler, Semaphore, ShaderModule,
SubpassDesc, ViewInfo,
};
//TODO: expose coherent type 0x2 when it's properly supported
const BUFFER_TYPE_MASK: u32 = 0x1 | 0x4;
struct InputLayout {
raw: ComPtr<d3d11::ID3D11InputLayout>,
required_bindings: u32,
max_vertex_bindings: u32,
topology: d3d11::D3D11_PRIMITIVE_TOPOLOGY,
vertex_strides: Vec<u32>,
}
#[derive(Clone)]
pub struct DepthStencilState {
pub raw: ComPtr<d3d11::ID3D11DepthStencilState>,
pub stencil_ref: pso::State<pso::StencilValue>,
pub read_only: bool,
}
pub struct Device {
raw: ComPtr<d3d11::ID3D11Device>,
raw1: Option<ComPtr<d3d11_1::ID3D11Device1>>,
pub(crate) context: ComPtr<d3d11::ID3D11DeviceContext>,
features: hal::Features,
memory_properties: MemoryProperties,
internal: Arc<internal::Internal>,
}
impl fmt::Debug for Device {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("Device")
}
}
impl Drop for Device {
fn drop(&mut self) {
if let Ok(debug) = self.raw.cast::<d3d11sdklayers::ID3D11Debug>() {
unsafe {
debug.ReportLiveDeviceObjects(d3d11sdklayers::D3D11_RLDO_DETAIL);
}
}
}
}
unsafe impl Send for Device {}
unsafe impl Sync for Device {}
impl Device {
pub fn new(
device: ComPtr<d3d11::ID3D11Device>,
device1: Option<ComPtr<d3d11_1::ID3D11Device1>>,
context: ComPtr<d3d11::ID3D11DeviceContext>,
features: hal::Features,
memory_properties: MemoryProperties,
) -> Self {
Device {
internal: Arc::new(internal::Internal::new(&device)),
raw: device,
raw1: device1,
context,
features,
memory_properties,
}
}
pub fn as_raw(&self) -> *mut d3d11::ID3D11Device {
self.raw.as_raw()
}
fn create_rasterizer_state(
&self,
rasterizer_desc: &pso::Rasterizer,
multisampling_desc: &Option<pso::Multisampling>,
) -> Result<ComPtr<d3d11::ID3D11RasterizerState>, pso::CreationError> {
let mut rasterizer = ptr::null_mut();
let desc = conv::map_rasterizer_desc(rasterizer_desc, multisampling_desc);
let hr = unsafe {
self.raw
.CreateRasterizerState(&desc, &mut rasterizer as *mut *mut _ as *mut *mut _)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(rasterizer) })
} else {
Err(pso::CreationError::Other)
}
}
fn create_blend_state(
&self,
blend_desc: &pso::BlendDesc,
multisampling: &Option<pso::Multisampling>,
) -> Result<ComPtr<d3d11::ID3D11BlendState>, pso::CreationError> {
let mut blend = ptr::null_mut();
let desc = conv::map_blend_desc(blend_desc, multisampling);
let hr = unsafe {
self.raw
.CreateBlendState(&desc, &mut blend as *mut *mut _ as *mut *mut _)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(blend) })
} else {
Err(pso::CreationError::Other)
}
}
fn create_depth_stencil_state(
&self,
depth_desc: &pso::DepthStencilDesc,
) -> Result<DepthStencilState, pso::CreationError> {
let mut depth = ptr::null_mut();
let (desc, stencil_ref, read_only) = conv::map_depth_stencil_desc(depth_desc);
let hr = unsafe {
self.raw
.CreateDepthStencilState(&desc, &mut depth as *mut *mut _ as *mut *mut _)
};
if winerror::SUCCEEDED(hr) {
Ok(DepthStencilState {
raw: unsafe { ComPtr::from_raw(depth) },
stencil_ref,
read_only,
})
} else {
Err(pso::CreationError::Other)
}
}
fn create_input_layout(
&self,
vs: ComPtr<d3dcommon::ID3DBlob>,
vertex_buffers: &[pso::VertexBufferDesc],
attributes: &[pso::AttributeDesc],
input_assembler: &pso::InputAssemblerDesc,
vertex_semantic_remapping: auxil::FastHashMap<u32, Option<(u32, u32)>>,
) -> Result<InputLayout, pso::CreationError> {
let mut layout = ptr::null_mut();
let mut vertex_strides = Vec::new();
let mut required_bindings = 0u32;
let mut max_vertex_bindings = 0u32;
for buffer in vertex_buffers {
required_bindings |= 1 << buffer.binding as u32;
max_vertex_bindings = max_vertex_bindings.max(1u32 + buffer.binding as u32);
while vertex_strides.len() <= buffer.binding as usize {
vertex_strides.push(0);
}
vertex_strides[buffer.binding as usize] = buffer.stride;
}
// See [`shader::introspect_spirv_vertex_semantic_remapping`] for details of why this is needed.
let semantics: Vec<_> = attributes
.iter()
.map(
|attrib| match vertex_semantic_remapping.get(&attrib.location) {
Some(Some((major, minor))) => {
let name = std::borrow::Cow::Owned(format!("TEXCOORD{}_\0", major));
let location = *minor;
(name, location)
}
_ => {
let name = std::borrow::Cow::Borrowed("TEXCOORD\0");
let location = attrib.location;
(name, location)
}
},
)
.collect();
let input_elements = attributes
.iter()
.zip(semantics.iter())
.filter_map(|(attrib, (semantic_name, semantic_index))| {
let buffer_desc = match vertex_buffers
.iter()
.find(|buffer_desc| buffer_desc.binding == attrib.binding)
{
Some(buffer_desc) => buffer_desc,
None => {
// TODO:
// error!("Couldn't find associated vertex buffer description {:?}", attrib.binding);
return Some(Err(pso::CreationError::Other));
}
};
let (slot_class, step_rate) = match buffer_desc.rate {
VertexInputRate::Vertex => (d3d11::D3D11_INPUT_PER_VERTEX_DATA, 0),
VertexInputRate::Instance(divisor) => {
(d3d11::D3D11_INPUT_PER_INSTANCE_DATA, divisor)
}
};
let format = attrib.element.format;
Some(Ok(d3d11::D3D11_INPUT_ELEMENT_DESC {
SemanticName: semantic_name.as_ptr() as *const _, // Semantic name used by SPIRV-Cross
SemanticIndex: *semantic_index,
Format: match conv::map_format(format) {
Some(fm) => fm,
None => {
// TODO:
// error!("Unable to find DXGI format for {:?}", format);
return Some(Err(pso::CreationError::Other));
}
},
InputSlot: attrib.binding as _,
AlignedByteOffset: attrib.element.offset,
InputSlotClass: slot_class,
InstanceDataStepRate: step_rate as _,
}))
})
.collect::<Result<Vec<_>, _>>()?;
let hr = unsafe {
self.raw.CreateInputLayout(
input_elements.as_ptr(),
input_elements.len() as _,
vs.GetBufferPointer(),
vs.GetBufferSize(),
&mut layout as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
let topology = conv::map_topology(input_assembler);
Ok(InputLayout {
raw: unsafe { ComPtr::from_raw(layout) },
required_bindings,
max_vertex_bindings,
topology,
vertex_strides,
})
} else {
error!("CreateInputLayout error 0x{:X}", hr);
Err(pso::CreationError::Other)
}
}
fn create_vertex_shader(
&self,
blob: ComPtr<d3dcommon::ID3DBlob>,
) -> Result<ComPtr<d3d11::ID3D11VertexShader>, pso::CreationError> {
let mut vs = ptr::null_mut();
let hr = unsafe {
self.raw.CreateVertexShader(
blob.GetBufferPointer(),
blob.GetBufferSize(),
ptr::null_mut(),
&mut vs as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(vs) })
} else {
Err(pso::CreationError::Other)
}
}
fn create_pixel_shader(
&self,
blob: ComPtr<d3dcommon::ID3DBlob>,
) -> Result<ComPtr<d3d11::ID3D11PixelShader>, pso::CreationError> {
let mut ps = ptr::null_mut();
let hr = unsafe {
self.raw.CreatePixelShader(
blob.GetBufferPointer(),
blob.GetBufferSize(),
ptr::null_mut(),
&mut ps as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(ps) })
} else {
Err(pso::CreationError::Other)
}
}
fn create_geometry_shader(
&self,
blob: ComPtr<d3dcommon::ID3DBlob>,
) -> Result<ComPtr<d3d11::ID3D11GeometryShader>, pso::CreationError> {
let mut gs = ptr::null_mut();
let hr = unsafe {
self.raw.CreateGeometryShader(
blob.GetBufferPointer(),
blob.GetBufferSize(),
ptr::null_mut(),
&mut gs as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(gs) })
} else {
Err(pso::CreationError::Other)
}
}
fn create_hull_shader(
&self,
blob: ComPtr<d3dcommon::ID3DBlob>,
) -> Result<ComPtr<d3d11::ID3D11HullShader>, pso::CreationError> {
let mut hs = ptr::null_mut();
let hr = unsafe {
self.raw.CreateHullShader(
blob.GetBufferPointer(),
blob.GetBufferSize(),
ptr::null_mut(),
&mut hs as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(hs) })
} else {
Err(pso::CreationError::Other)
}
}
fn create_domain_shader(
&self,
blob: ComPtr<d3dcommon::ID3DBlob>,
) -> Result<ComPtr<d3d11::ID3D11DomainShader>, pso::CreationError> {
let mut ds = ptr::null_mut();
let hr = unsafe {
self.raw.CreateDomainShader(
blob.GetBufferPointer(),
blob.GetBufferSize(),
ptr::null_mut(),
&mut ds as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(ds) })
} else {
Err(pso::CreationError::Other)
}
}
fn create_compute_shader(
&self,
blob: ComPtr<d3dcommon::ID3DBlob>,
) -> Result<ComPtr<d3d11::ID3D11ComputeShader>, pso::CreationError> {
let mut cs = ptr::null_mut();
let hr = unsafe {
self.raw.CreateComputeShader(
blob.GetBufferPointer(),
blob.GetBufferSize(),
ptr::null_mut(),
&mut cs as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(cs) })
} else {
Err(pso::CreationError::Other)
}
}
// TODO: fix return type..
fn extract_entry_point(
stage: ShaderStage,
source: &pso::EntryPoint<Backend>,
layout: &PipelineLayout,
features: &hal::Features,
) -> Result<Option<ComPtr<d3dcommon::ID3DBlob>>, pso::CreationError> {
// TODO: entrypoint stuff
match *source.module {
ShaderModule::Dxbc(ref _shader) => {
error!("DXBC modules are not supported yet");
Err(pso::CreationError::Other)
}
ShaderModule::Spirv(ref raw_data) => Ok(shader::compile_spirv_entrypoint(
raw_data, stage, source, layout, features,
)?),
}
}
fn view_image_as_shader_resource(
&self,
info: &ViewInfo,
) -> Result<ComPtr<d3d11::ID3D11ShaderResourceView>, image::ViewCreationError> {
let mut desc: d3d11::D3D11_SHADER_RESOURCE_VIEW_DESC = unsafe { mem::zeroed() };
desc.Format = info.format;
if desc.Format == dxgiformat::DXGI_FORMAT_D32_FLOAT_S8X24_UINT {
desc.Format = dxgiformat::DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS;
}
#[allow(non_snake_case)]
let MostDetailedMip = info.levels.start as _;
#[allow(non_snake_case)]
let MipLevels = (info.levels.end - info.levels.start) as _;
#[allow(non_snake_case)]
let FirstArraySlice = info.layers.start as _;
#[allow(non_snake_case)]
let ArraySize = (info.layers.end - info.layers.start) as _;
match info.view_kind {
image::ViewKind::D1 => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE1D;
*unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_SRV {
MostDetailedMip,
MipLevels,
}
}
image::ViewKind::D1Array => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE1DARRAY;
*unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_SRV {
MostDetailedMip,
MipLevels,
FirstArraySlice,
ArraySize,
}
}
image::ViewKind::D2 if info.kind.num_samples() > 1 => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE2DMS;
*unsafe { desc.u.Texture2DMS_mut() } = d3d11::D3D11_TEX2DMS_SRV {
UnusedField_NothingToDefine: 0,
}
}
image::ViewKind::D2 => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE2D;
*unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_SRV {
MostDetailedMip,
MipLevels,
}
}
image::ViewKind::D2Array if info.kind.num_samples() > 1 => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY;
*unsafe { desc.u.Texture2DMSArray_mut() } = d3d11::D3D11_TEX2DMS_ARRAY_SRV {
FirstArraySlice,
ArraySize,
}
}
image::ViewKind::D2Array => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE2DARRAY;
*unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_SRV {
MostDetailedMip,
MipLevels,
FirstArraySlice,
ArraySize,
}
}
image::ViewKind::D3 => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE3D;
*unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_SRV {
MostDetailedMip,
MipLevels,
}
}
image::ViewKind::Cube => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURECUBE;
*unsafe { desc.u.TextureCube_mut() } = d3d11::D3D11_TEXCUBE_SRV {
MostDetailedMip,
MipLevels,
}
}
image::ViewKind::CubeArray => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURECUBEARRAY;
*unsafe { desc.u.TextureCubeArray_mut() } = d3d11::D3D11_TEXCUBE_ARRAY_SRV {
MostDetailedMip,
MipLevels,
First2DArrayFace: FirstArraySlice,
NumCubes: ArraySize / 6,
}
}
}
let mut srv = ptr::null_mut();
let hr = unsafe {
self.raw.CreateShaderResourceView(
info.resource,
&desc,
&mut srv as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(srv) })
} else {
Err(image::ViewCreationError::Unsupported)
}
}
fn view_image_as_unordered_access(
&self,
info: &ViewInfo,
) -> Result<ComPtr<d3d11::ID3D11UnorderedAccessView>, image::ViewCreationError> {
let mut desc: d3d11::D3D11_UNORDERED_ACCESS_VIEW_DESC = unsafe { mem::zeroed() };
desc.Format = info.format;
#[allow(non_snake_case)]
let MipSlice = info.levels.start as _;
#[allow(non_snake_case)]
let FirstArraySlice = info.layers.start as _;
#[allow(non_snake_case)]
let ArraySize = (info.layers.end - info.layers.start) as _;
match info.view_kind {
image::ViewKind::D1 => {
desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE1D;
*unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_UAV {
MipSlice: info.levels.start as _,
}
}
image::ViewKind::D1Array => {
desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE1DARRAY;
*unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_UAV {
MipSlice,
FirstArraySlice,
ArraySize,
}
}
image::ViewKind::D2 => {
desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE2D;
*unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_UAV {
MipSlice: info.levels.start as _,
}
}
image::ViewKind::D2Array => {
desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE2DARRAY;
*unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_UAV {
MipSlice,
FirstArraySlice,
ArraySize,
}
}
image::ViewKind::D3 => {
desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE3D;
*unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_UAV {
MipSlice,
FirstWSlice: FirstArraySlice,
WSize: ArraySize,
}
}
_ => unimplemented!(),
}
let mut uav = ptr::null_mut();
let hr = unsafe {
self.raw.CreateUnorderedAccessView(
info.resource,
&desc,
&mut uav as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(uav) })
} else {
error!("CreateUnorderedAccessView failed: 0x{:x}", hr);
Err(image::ViewCreationError::Unsupported)
}
}
pub(crate) fn view_image_as_render_target(
&self,
info: &ViewInfo,
) -> Result<ComPtr<d3d11::ID3D11RenderTargetView>, image::ViewCreationError> {
let mut desc: d3d11::D3D11_RENDER_TARGET_VIEW_DESC = unsafe { mem::zeroed() };
desc.Format = info.format;
#[allow(non_snake_case)]
let MipSlice = info.levels.start as _;
#[allow(non_snake_case)]
let FirstArraySlice = info.layers.start as _;
#[allow(non_snake_case)]
let ArraySize = (info.layers.end - info.layers.start) as _;
match info.view_kind {
image::ViewKind::D1 => {
desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE1D;
*unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_RTV { MipSlice }
}
image::ViewKind::D1Array => {
desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE1DARRAY;
*unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_RTV {
MipSlice,
FirstArraySlice,
ArraySize,
}
}
image::ViewKind::D2 => {
if info.kind.num_samples() > 1 {
desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE2DMS;
*unsafe { desc.u.Texture2DMS_mut() } = d3d11::D3D11_TEX2DMS_RTV {
UnusedField_NothingToDefine: 0,
}
} else {
desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE2D;
*unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_RTV { MipSlice }
}
}
image::ViewKind::D2Array => {
if info.kind.num_samples() > 1 {
desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY;
*unsafe { desc.u.Texture2DMSArray_mut() } = d3d11::D3D11_TEX2DMS_ARRAY_RTV {
FirstArraySlice,
ArraySize,
}
} else {
desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE2DARRAY;
*unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_RTV {
MipSlice,
FirstArraySlice,
ArraySize,
}
}
}
image::ViewKind::D3 => {
desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE3D;
*unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_RTV {
MipSlice,
FirstWSlice: FirstArraySlice,
WSize: ArraySize,
}
}
_ => unimplemented!(),
}
let mut rtv = ptr::null_mut();
let hr = unsafe {
self.raw.CreateRenderTargetView(
info.resource,
&desc,
&mut rtv as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(rtv) })
} else {
error!("CreateRenderTargetView failed: 0x{:x}", hr);
Err(image::ViewCreationError::Unsupported)
}
}
fn view_image_as_depth_stencil(
&self,
info: &ViewInfo,
read_only_stencil: Option<bool>,
) -> Result<ComPtr<d3d11::ID3D11DepthStencilView>, image::ViewCreationError> {
#![allow(non_snake_case)]
let MipSlice = info.levels.start as _;
let FirstArraySlice = info.layers.start as _;
let ArraySize = (info.layers.end - info.layers.start) as _;
assert_eq!(info.levels.start + 1, info.levels.end);
assert!(info.layers.end <= info.kind.num_layers());
let mut desc: d3d11::D3D11_DEPTH_STENCIL_VIEW_DESC = unsafe { mem::zeroed() };
desc.Format = info.format;
if let Some(stencil) = read_only_stencil {
desc.Flags = match stencil {
true => d3d11::D3D11_DSV_READ_ONLY_DEPTH | d3d11::D3D11_DSV_READ_ONLY_STENCIL,
false => d3d11::D3D11_DSV_READ_ONLY_DEPTH,
}
}
match info.view_kind {
image::ViewKind::D2 => {
if info.kind.num_samples() > 1 {
desc.ViewDimension = d3d11::D3D11_DSV_DIMENSION_TEXTURE2DMS;
*unsafe { desc.u.Texture2DMS_mut() } = d3d11::D3D11_TEX2DMS_DSV {
UnusedField_NothingToDefine: 0,
}
} else {
desc.ViewDimension = d3d11::D3D11_DSV_DIMENSION_TEXTURE2D;
*unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_DSV { MipSlice }
}
}
image::ViewKind::D2Array => {
if info.kind.num_samples() > 1 {
desc.ViewDimension = d3d11::D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY;
*unsafe { desc.u.Texture2DMSArray_mut() } = d3d11::D3D11_TEX2DMS_ARRAY_DSV {
FirstArraySlice,
ArraySize,
}
} else {
desc.ViewDimension = d3d11::D3D11_DSV_DIMENSION_TEXTURE2DARRAY;
*unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_DSV {
MipSlice,
FirstArraySlice,
ArraySize,
}
}
}
_ => unimplemented!(),
}
let mut dsv = ptr::null_mut();
let hr = unsafe {
self.raw.CreateDepthStencilView(
info.resource,
&desc,
&mut dsv as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(dsv) })
} else {
error!("CreateDepthStencilView failed: 0x{:x}", hr);
Err(image::ViewCreationError::Unsupported)
}
}
pub(crate) fn create_swapchain_impl(
&self,
config: &window::SwapchainConfig,
window_handle: HWND,
factory: ComPtr<dxgi::IDXGIFactory>,
) -> Result<(ComPtr<dxgi::IDXGISwapChain>, dxgiformat::DXGI_FORMAT), window::SwapchainError>
{
// TODO: use IDXGIFactory2 for >=11.1
// TODO: this function should be able to fail (Result)?
debug!("{:#?}", config);
let non_srgb_format = conv::map_format_nosrgb(config.format).unwrap();
let mut desc = dxgi::DXGI_SWAP_CHAIN_DESC {
BufferDesc: dxgitype::DXGI_MODE_DESC {
Width: config.extent.width,
Height: config.extent.height,
// TODO: should this grab max value of all monitor hz? vsync
// will clamp to current monitor anyways?
RefreshRate: dxgitype::DXGI_RATIONAL {
Numerator: 1,
Denominator: 60,
},
Format: non_srgb_format,
ScanlineOrdering: dxgitype::DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED,
Scaling: dxgitype::DXGI_MODE_SCALING_UNSPECIFIED,
},
SampleDesc: dxgitype::DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
BufferUsage: dxgitype::DXGI_USAGE_RENDER_TARGET_OUTPUT,
BufferCount: config.image_count,
OutputWindow: window_handle,
// TODO:
Windowed: TRUE,
// TODO:
SwapEffect: dxgi::DXGI_SWAP_EFFECT_DISCARD,
Flags: 0,
};
let dxgi_swapchain = {
let mut swapchain: *mut dxgi::IDXGISwapChain = ptr::null_mut();
let hr = unsafe {
factory.CreateSwapChain(
self.raw.as_raw() as *mut _,
&mut desc as *mut _,
&mut swapchain as *mut *mut _ as *mut *mut _,
)
};
assert_eq!(hr, winerror::S_OK);
unsafe { ComPtr::from_raw(swapchain) }
};
Ok((dxgi_swapchain, non_srgb_format))
}
}
impl device::Device<Backend> for Device {
unsafe fn allocate_memory(
&self,
mem_type: hal::MemoryTypeId,
size: u64,
) -> Result<Memory, device::AllocationError> {
let properties = self.memory_properties.memory_types[mem_type.0].properties;
let host_ptr = if properties.contains(hal::memory::Properties::CPU_VISIBLE) {
let mut data = vec![0u8; size as usize];
let ptr = data.as_mut_ptr();
mem::forget(data);
ptr
} else {
ptr::null_mut()
};
Ok(Memory {
properties,
size,
host_ptr,
local_buffers: Arc::new(RwLock::new(thunderdome::Arena::new())),
})
}
unsafe fn create_command_pool(
&self,
_family: QueueFamilyId,
_create_flags: pool::CommandPoolCreateFlags,
) -> Result<CommandPool, device::OutOfMemory> {
// TODO:
Ok(CommandPool {
device: self.raw.clone(),
device1: self.raw1.clone(),
internal: Arc::clone(&self.internal),
})
}
unsafe fn destroy_command_pool(&self, _pool: CommandPool) {
// automatic
}
unsafe fn create_render_pass<'a, IA, IS, ID>(
&self,
attachments: IA,
subpasses: IS,
_dependencies: ID,
) -> Result<RenderPass, device::OutOfMemory>
where
IA: IntoIterator,
IA::Item: Borrow<pass::Attachment>,
IS: IntoIterator,
IS::Item: Borrow<pass::SubpassDesc<'a>>,
ID: IntoIterator,
ID::Item: Borrow<pass::SubpassDependency>,
{
Ok(RenderPass {
attachments: attachments
.into_iter()
.map(|attachment| attachment.borrow().clone())
.collect(),
subpasses: subpasses
.into_iter()
.map(|desc| {
let desc = desc.borrow();
SubpassDesc {
color_attachments: desc
.colors
.iter()
.map(|color| color.borrow().clone())
.collect(),
depth_stencil_attachment: desc.depth_stencil.map(|d| *d),
input_attachments: desc
.inputs
.iter()
.map(|input| input.borrow().clone())
.collect(),
resolve_attachments: desc
.resolves
.iter()
.map(|resolve| resolve.borrow().clone())
.collect(),
}
})
.collect(),
})
}
unsafe fn create_pipeline_layout<IS, IR>(
&self,
set_layouts: IS,
_push_constant_ranges: IR,
) -> Result<PipelineLayout, device::OutOfMemory>
where
IS: IntoIterator,
IS::Item: Borrow<DescriptorSetLayout>,
IR: IntoIterator,
IR::Item: Borrow<(pso::ShaderStageFlags, Range<u32>)>,
{
let mut res_offsets = MultiStageData::<RegisterData<RegisterAccumulator>>::default();
let mut sets = Vec::new();
for set_layout in set_layouts {
let layout = set_layout.borrow();
sets.push(DescriptorSetInfo {
bindings: Arc::clone(&layout.bindings),
registers: res_offsets.advance(&layout.pool_mapping),
});
}
res_offsets.map_other(|data| {
// These use <= because this tells us the _next_ register, so maximum usage will be equal to the limit.
//
// Leave one slot for push constants
assert!(
data.c.res_index as u32
<= d3d11::D3D11_COMMONSHADER_CONSTANT_BUFFER_API_SLOT_COUNT - 1,
"{} bound constant buffers exceeds limit of {}",
data.c.res_index as u32,
d3d11::D3D11_COMMONSHADER_CONSTANT_BUFFER_API_SLOT_COUNT - 1,
);
assert!(
data.s.res_index as u32 <= d3d11::D3D11_COMMONSHADER_SAMPLER_REGISTER_COUNT,
"{} bound samplers exceeds limit of {}",
data.s.res_index as u32,
d3d11::D3D11_COMMONSHADER_SAMPLER_REGISTER_COUNT,
);
assert!(
data.t.res_index as u32 <= d3d11::D3D11_COMMONSHADER_INPUT_RESOURCE_REGISTER_COUNT,
"{} bound sampled textures and read-only buffers exceeds limit of {}",
data.t.res_index as u32,
d3d11::D3D11_COMMONSHADER_INPUT_RESOURCE_REGISTER_COUNT,
);
assert!(
data.u.res_index as u32 <= d3d11::D3D11_PS_CS_UAV_REGISTER_COUNT,
"{} bound storage textures and read-write buffers exceeds limit of {}",
data.u.res_index as u32,
d3d11::D3D11_PS_CS_UAV_REGISTER_COUNT,
);
});
Ok(PipelineLayout { sets })
}
unsafe fn create_pipeline_cache(
&self,
_data: Option<&[u8]>,
) -> Result<(), device::OutOfMemory> {
Ok(())
}
unsafe fn get_pipeline_cache_data(&self, _cache: &()) -> Result<Vec<u8>, device::OutOfMemory> {
//empty
Ok(Vec::new())
}
unsafe fn destroy_pipeline_cache(&self, _: ()) {
//empty
}
unsafe fn merge_pipeline_caches<I>(&self, _: &(), _: I) -> Result<(), device::OutOfMemory>
where
I: IntoIterator,
I::Item: Borrow<()>,
{
//empty
Ok(())
}
unsafe fn create_graphics_pipeline<'a>(
&self,
desc: &pso::GraphicsPipelineDesc<'a, Backend>,
_cache: Option<&()>,
) -> Result<GraphicsPipeline, pso::CreationError> {
let features = &self.features;
let build_shader =
|stage: ShaderStage, source: Option<&pso::EntryPoint<'a, Backend>>| match source {
Some(src) => Self::extract_entry_point(stage, src, desc.layout, features),
None => Ok(None),
};
let (layout, vs, gs, hs, ds) = match desc.primitive_assembler {
pso::PrimitiveAssemblerDesc::Vertex {
buffers,
attributes,
ref input_assembler,
ref vertex,
ref tessellation,
ref geometry,
} => {
let vertex_semantic_remapping = match vertex.module {
ShaderModule::Spirv(spirv) => {
shader::introspect_spirv_vertex_semantic_remapping(spirv)?
}
_ => unimplemented!(),
};
let vs = build_shader(ShaderStage::Vertex, Some(&vertex))?.unwrap();
let gs = build_shader(ShaderStage::Geometry, geometry.as_ref())?;
let layout = self.create_input_layout(
vs.clone(),
buffers,
attributes,
input_assembler,
vertex_semantic_remapping,
)?;
let vs = self.create_vertex_shader(vs)?;
let gs = if let Some(blob) = gs {
Some(self.create_geometry_shader(blob)?)
} else {
None
};
let (hs, ds) = if let Some(ts) = tessellation {
let hs = build_shader(ShaderStage::Hull, Some(&ts.0))?.unwrap();
let ds = build_shader(ShaderStage::Domain, Some(&ts.1))?.unwrap();
(
Some(self.create_hull_shader(hs)?),
Some(self.create_domain_shader(ds)?),
)
} else {
(None, None)
};
(layout, vs, gs, hs, ds)
}
pso::PrimitiveAssemblerDesc::Mesh { .. } => {
return Err(pso::CreationError::UnsupportedPipeline)
}
};
let ps = build_shader(ShaderStage::Fragment, desc.fragment.as_ref())?;
let ps = if let Some(blob) = ps {
Some(self.create_pixel_shader(blob)?)
} else {
None
};
let rasterizer_state =
self.create_rasterizer_state(&desc.rasterizer, &desc.multisampling)?;
let blend_state = self.create_blend_state(&desc.blender, &desc.multisampling)?;
let depth_stencil_state = Some(self.create_depth_stencil_state(&desc.depth_stencil)?);
match desc.label {
Some(label) if verify_debug_ascii(label) => {
let mut name = label.to_string();
set_debug_name_with_suffix(&blend_state, &mut name, " -- Blend State");
set_debug_name_with_suffix(&rasterizer_state, &mut name, " -- Rasterizer State");
set_debug_name_with_suffix(&layout.raw, &mut name, " -- Input Layout");
if let Some(ref dss) = depth_stencil_state {
set_debug_name_with_suffix(&dss.raw, &mut name, " -- Depth Stencil State");
}
}
_ => {}
}
Ok(GraphicsPipeline {
vs,
gs,
ds,
hs,
ps,
topology: layout.topology,
input_layout: layout.raw,
rasterizer_state,
blend_state,
depth_stencil_state,
baked_states: desc.baked_states.clone(),
required_bindings: layout.required_bindings,
max_vertex_bindings: layout.max_vertex_bindings,
strides: layout.vertex_strides,
})
}
unsafe fn create_compute_pipeline<'a>(
&self,
desc: &pso::ComputePipelineDesc<'a, Backend>,
_cache: Option<&()>,
) -> Result<ComputePipeline, pso::CreationError> {
let features = &self.features;
let build_shader =
|stage: ShaderStage, source: Option<&pso::EntryPoint<'a, Backend>>| match source {
Some(src) => Self::extract_entry_point(stage, src, desc.layout, features),
None => Ok(None),
};
let cs = build_shader(ShaderStage::Compute, Some(&desc.shader))?.unwrap();
let cs = self.create_compute_shader(cs)?;
Ok(ComputePipeline { cs })
}
unsafe fn create_framebuffer<I>(
&self,
_renderpass: &RenderPass,
_attachments: I,
extent: image::Extent,
) -> Result<Framebuffer, device::OutOfMemory> {
Ok(Framebuffer {
layers: extent.depth as _,
})
}
unsafe fn create_shader_module(
&self,
raw_data: &[u32],
) -> Result<ShaderModule, device::ShaderError> {
Ok(ShaderModule::Spirv(raw_data.into()))
}
unsafe fn create_buffer(
&self,
size: u64,
usage: buffer::Usage,
) -> Result<Buffer, buffer::CreationError> {
use buffer::Usage;
let mut bind = 0;
if usage.contains(Usage::UNIFORM) {
bind |= d3d11::D3D11_BIND_CONSTANT_BUFFER;
}
if usage.contains(Usage::VERTEX) {
bind |= d3d11::D3D11_BIND_VERTEX_BUFFER;
}
if usage.contains(Usage::INDEX) {
bind |= d3d11::D3D11_BIND_INDEX_BUFFER;
}
// TODO: >=11.1
if usage.intersects(
Usage::UNIFORM_TEXEL | Usage::STORAGE_TEXEL | Usage::TRANSFER_SRC | Usage::STORAGE,
) {
bind |= d3d11::D3D11_BIND_SHADER_RESOURCE;
}
if usage.intersects(Usage::TRANSFER_DST | Usage::STORAGE) {
bind |= d3d11::D3D11_BIND_UNORDERED_ACCESS;
}
// if `D3D11_BIND_CONSTANT_BUFFER` intersects with any other bind flag, we need to handle
// it by creating two buffers. one with `D3D11_BIND_CONSTANT_BUFFER` and one with the rest
let needs_disjoint_cb = bind & d3d11::D3D11_BIND_CONSTANT_BUFFER != 0
&& bind != d3d11::D3D11_BIND_CONSTANT_BUFFER;
if needs_disjoint_cb {
bind ^= d3d11::D3D11_BIND_CONSTANT_BUFFER;
}
fn up_align(x: u64, alignment: u64) -> u64 {
(x + alignment - 1) & !(alignment - 1)
}
// constant buffer size need to be divisible by 16
let size = if usage.contains(Usage::UNIFORM) {
up_align(size, 16)
} else {
up_align(size, 4)
};
Ok(Buffer {
internal: InternalBuffer {
raw: ptr::null_mut(),
disjoint_cb: if needs_disjoint_cb {
Some(ptr::null_mut())
} else {
None
},
srv: None,
uav: None,
usage,
debug_name: None,
},
bound_range: 0..0,
local_memory_arena: Weak::new(),
memory_index: None,
is_coherent: false,
memory_ptr: ptr::null_mut(),
bind,
requirements: memory::Requirements {
size,
alignment: 4,
type_mask: BUFFER_TYPE_MASK,
},
})
}
unsafe fn get_buffer_requirements(&self, buffer: &Buffer) -> memory::Requirements {
buffer.requirements
}
unsafe fn bind_buffer_memory(
&self,
memory: &Memory,
offset: u64,
buffer: &mut Buffer,
) -> Result<(), device::BindError> {
debug!(
"usage={:?}, props={:b}",
buffer.internal.usage, memory.properties
);
#[allow(non_snake_case)]
let mut MiscFlags = if buffer.bind
& (d3d11::D3D11_BIND_SHADER_RESOURCE | d3d11::D3D11_BIND_UNORDERED_ACCESS)
!= 0
{
d3d11::D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS
} else {
0
};
if buffer.internal.usage.contains(buffer::Usage::INDIRECT) {
MiscFlags |= d3d11::D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS;
}
let initial_data = if memory.host_ptr.is_null() {
None
} else {
Some(d3d11::D3D11_SUBRESOURCE_DATA {
pSysMem: memory.host_ptr.offset(offset as isize) as *const _,
SysMemPitch: 0,
SysMemSlicePitch: 0,
})
};
//TODO: check `memory.properties.contains(memory::Properties::DEVICE_LOCAL)` ?
let raw = {
// device local memory
let desc = d3d11::D3D11_BUFFER_DESC {
ByteWidth: buffer.requirements.size as _,
Usage: d3d11::D3D11_USAGE_DEFAULT,
BindFlags: buffer.bind,
CPUAccessFlags: 0,
MiscFlags,
StructureByteStride: if buffer.internal.usage.contains(buffer::Usage::TRANSFER_SRC)
{
4
} else {
0
},
};
let mut raw: *mut d3d11::ID3D11Buffer = ptr::null_mut();
let hr = self.raw.CreateBuffer(
&desc,
initial_data.as_ref().map_or(ptr::null_mut(), |id| id),
&mut raw as *mut *mut _ as *mut *mut _,
);
if !winerror::SUCCEEDED(hr) {
return Err(device::BindError::WrongMemory);
}
if let Some(ref mut name) = buffer.internal.debug_name {
set_debug_name(&*raw, name);
}
ComPtr::from_raw(raw)
};
let disjoint_cb = if buffer.internal.disjoint_cb.is_some() {
let desc = d3d11::D3D11_BUFFER_DESC {
ByteWidth: buffer.requirements.size as _,
Usage: d3d11::D3D11_USAGE_DEFAULT,
BindFlags: d3d11::D3D11_BIND_CONSTANT_BUFFER,
CPUAccessFlags: 0,
MiscFlags: 0,
StructureByteStride: 0,
};
let mut disjoint_raw: *mut d3d11::ID3D11Buffer = ptr::null_mut();
let hr = self.raw.CreateBuffer(
&desc,
initial_data.as_ref().map_or(ptr::null_mut(), |id| id),
&mut disjoint_raw as *mut *mut _ as *mut *mut _,
);
if !winerror::SUCCEEDED(hr) {
return Err(device::BindError::WrongMemory);
}
if let Some(ref mut name) = buffer.internal.debug_name {
set_debug_name_with_suffix(&*disjoint_raw, name, " -- Constant Buffer");
}
Some(disjoint_raw)
} else {
None
};
let srv = if buffer.bind & d3d11::D3D11_BIND_SHADER_RESOURCE != 0 {
let mut desc = mem::zeroed::<d3d11::D3D11_SHADER_RESOURCE_VIEW_DESC>();
desc.Format = dxgiformat::DXGI_FORMAT_R32_TYPELESS;
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_BUFFEREX;
*desc.u.BufferEx_mut() = d3d11::D3D11_BUFFEREX_SRV {
FirstElement: 0,
NumElements: buffer.requirements.size as u32 / 4,
Flags: d3d11::D3D11_BUFFEREX_SRV_FLAG_RAW,
};
let mut srv: *mut d3d11::ID3D11ShaderResourceView = ptr::null_mut();
let hr = self.raw.CreateShaderResourceView(
raw.as_raw() as *mut _,
&desc,
&mut srv as *mut *mut _ as *mut *mut _,
);
if !winerror::SUCCEEDED(hr) {
error!("CreateShaderResourceView failed: 0x{:x}", hr);
return Err(device::BindError::WrongMemory);
}
if let Some(ref mut name) = buffer.internal.debug_name
|
Some(srv)
} else {
None
};
let uav = if buffer.bind & d3d11::D3D11_BIND_UNORDERED_ACCESS != 0 {
let mut desc = mem::zeroed::<d3d11::D3D11_UNORDERED_ACCESS_VIEW_DESC>();
desc.Format = dxgiformat::DXGI_FORMAT_R32_TYPELESS;
desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_BUFFER;
*desc.u.Buffer_mut() = d3d11::D3D11_BUFFER_UAV {
FirstElement: 0,
NumElements: buffer.requirements.size as u32 / 4,
Flags: d3d11::D3D11_BUFFER_UAV_FLAG_RAW,
};
let mut uav: *mut d3d11::ID3D11UnorderedAccessView = ptr::null_mut();
let hr = self.raw.CreateUnorderedAccessView(
raw.as_raw() as *mut _,
&desc,
&mut uav as *mut *mut _ as *mut *mut _,
);
if !winerror::SUCCEEDED(hr) {
error!("CreateUnorderedAccessView failed: 0x{:x}", hr);
return Err(device::BindError::WrongMemory);
}
if let Some(ref mut name) = buffer.internal.debug_name {
set_debug_name_with_suffix(&*uav, name, " -- UAV");
}
Some(uav)
} else {
None
};
let internal = InternalBuffer {
raw: raw.into_raw(),
disjoint_cb,
srv,
uav,
usage: buffer.internal.usage,
debug_name: buffer.internal.debug_name.take(),
};
let range = offset..offset + buffer.requirements.size;
let memory_index = memory.bind_buffer(range.clone(), internal.clone());
buffer.internal = internal;
buffer.is_coherent = memory
.properties
.contains(hal::memory::Properties::COHERENT);
buffer.memory_ptr = memory.host_ptr;
buffer.bound_range = range;
buffer.local_memory_arena = Arc::downgrade(&memory.local_buffers);
buffer.memory_index = Some(memory_index);
Ok(())
}
unsafe fn create_buffer_view(
&self,
_buffer: &Buffer,
_format: Option<format::Format>,
_range: buffer::SubRange,
) -> Result<BufferView, buffer::ViewCreationError> {
unimplemented!()
}
unsafe fn create_image(
&self,
kind: image::Kind,
mip_levels: image::Level,
format: format::Format,
_tiling: image::Tiling,
usage: image::Usage,
view_caps: image::ViewCapabilities,
) -> Result<Image, image::CreationError> {
let surface_desc = format.base_format().0.desc();
let bytes_per_texel = surface_desc.bits / 8;
let ext = kind.extent();
let size = (ext.width * ext.height * ext.depth) as u64 * bytes_per_texel as u64;
let bind = conv::map_image_usage(usage, surface_desc);
debug!("{:b}", bind);
Ok(Image {
internal: InternalImage {
raw: ptr::null_mut(),
copy_srv: None,
srv: None,
unordered_access_views: Vec::new(),
depth_stencil_views: Vec::new(),
render_target_views: Vec::new(),
debug_name: None,
},
decomposed_format: conv::DecomposedDxgiFormat::UNKNOWN,
kind,
mip_levels,
format,
usage,
view_caps,
bind,
requirements: memory::Requirements {
size: size,
alignment: 4,
type_mask: 0x1, // device-local only
},
})
}
unsafe fn get_image_requirements(&self, image: &Image) -> memory::Requirements {
image.requirements
}
unsafe fn get_image_subresource_footprint(
&self,
_image: &Image,
_sub: image::Subresource,
) -> image::SubresourceFootprint {
unimplemented!()
}
unsafe fn bind_image_memory(
&self,
memory: &Memory,
_offset: u64,
image: &mut Image,
) -> Result<(), device::BindError> {
use image::Usage;
use memory::Properties;
let base_format = image.format.base_format();
let format_desc = base_format.0.desc();
let compressed = format_desc.is_compressed();
let depth = image.format.is_depth();
let stencil = image.format.is_stencil();
let (bind, usage, cpu) = if memory.properties == Properties::DEVICE_LOCAL {
(image.bind, d3d11::D3D11_USAGE_DEFAULT, 0)
} else if memory.properties
== (Properties::DEVICE_LOCAL | Properties::CPU_VISIBLE | Properties::CPU_CACHED)
{
(
image.bind,
d3d11::D3D11_USAGE_DYNAMIC,
d3d11::D3D11_CPU_ACCESS_WRITE,
)
} else if memory.properties == (Properties::CPU_VISIBLE | Properties::CPU_CACHED) {
(
0,
d3d11::D3D11_USAGE_STAGING,
d3d11::D3D11_CPU_ACCESS_READ | d3d11::D3D11_CPU_ACCESS_WRITE,
)
} else {
unimplemented!()
};
let dxgi_format = conv::map_format(image.format).unwrap();
let decomposed = conv::DecomposedDxgiFormat::from_dxgi_format(dxgi_format);
assert!(
memory.host_ptr.is_null(),
"Images can only be allocated from device-local memory"
);
let initial_data_ptr = ptr::null_mut();
let mut resource = ptr::null_mut();
let view_kind = match image.kind {
image::Kind::D1(width, layers) => {
let desc = d3d11::D3D11_TEXTURE1D_DESC {
Width: width,
MipLevels: image.mip_levels as _,
ArraySize: layers as _,
Format: decomposed.typeless,
Usage: usage,
BindFlags: bind,
CPUAccessFlags: cpu,
MiscFlags: 0,
};
let hr = self.raw.CreateTexture1D(
&desc,
initial_data_ptr,
&mut resource as *mut *mut _ as *mut *mut _,
);
if !winerror::SUCCEEDED(hr) {
error!("CreateTexture1D failed: 0x{:x}", hr);
return Err(device::BindError::WrongMemory);
}
image::ViewKind::D1Array
}
image::Kind::D2(width, height, layers, samples) => {
let desc = d3d11::D3D11_TEXTURE2D_DESC {
Width: width,
Height: height,
MipLevels: image.mip_levels as _,
ArraySize: layers as _,
Format: decomposed.typeless,
SampleDesc: dxgitype::DXGI_SAMPLE_DESC {
Count: samples as _,
Quality: 0,
},
Usage: usage,
BindFlags: bind,
CPUAccessFlags: cpu,
MiscFlags: if image.view_caps.contains(image::ViewCapabilities::KIND_CUBE) {
d3d11::D3D11_RESOURCE_MISC_TEXTURECUBE
} else {
0
},
};
let hr = self.raw.CreateTexture2D(
&desc,
initial_data_ptr,
&mut resource as *mut *mut _ as *mut *mut _,
);
if !winerror::SUCCEEDED(hr) {
error!("CreateTexture2D failed: 0x{:x}", hr);
return Err(device::BindError::WrongMemory);
}
image::ViewKind::D2Array
}
image::Kind::D3(width, height, depth) => {
let desc = d3d11::D3D11_TEXTURE3D_DESC {
Width: width,
Height: height,
Depth: depth,
MipLevels: image.mip_levels as _,
Format: decomposed.typeless,
Usage: usage,
BindFlags: bind,
CPUAccessFlags: cpu,
MiscFlags: 0,
};
let hr = self.raw.CreateTexture3D(
&desc,
initial_data_ptr,
&mut resource as *mut *mut _ as *mut *mut _,
);
if !winerror::SUCCEEDED(hr) {
error!("CreateTexture3D failed: 0x{:x}", hr);
return Err(device::BindError::WrongMemory);
}
image::ViewKind::D3
}
};
let mut unordered_access_views = Vec::new();
if image.usage.contains(Usage::TRANSFER_DST) && !compressed && !depth {
for mip in 0..image.mip_levels {
let view = ViewInfo {
resource: resource,
kind: image.kind,
caps: image::ViewCapabilities::empty(),
view_kind,
// TODO: we should be using `uav_format` rather than `copy_uav_format`, and share
// the UAVs when the formats are identical
format: decomposed.copy_uav.unwrap(),
levels: mip..(mip + 1),
layers: 0..image.kind.num_layers(),
};
let uav = self
.view_image_as_unordered_access(&view)
.map_err(|_| device::BindError::WrongMemory)?;
if let Some(ref name) = image.internal.debug_name {
set_debug_name(&uav, &format!("{} -- UAV Mip {}", name, mip));
}
unordered_access_views.push(uav);
}
}
let (copy_srv, srv) = if image.usage.contains(image::Usage::TRANSFER_SRC) {
let mut view = ViewInfo {
resource: resource,
kind: image.kind,
caps: image::ViewCapabilities::empty(),
view_kind,
format: decomposed.copy_srv.unwrap(),
levels: 0..image.mip_levels,
layers: 0..image.kind.num_layers(),
};
let copy_srv = if !compressed {
Some(
self.view_image_as_shader_resource(&view)
.map_err(|_| device::BindError::WrongMemory)?,
)
} else {
None
};
view.format = decomposed.srv.unwrap();
let srv = if !depth && !stencil {
Some(
self.view_image_as_shader_resource(&view)
.map_err(|_| device::BindError::WrongMemory)?,
)
} else {
None
};
(copy_srv, srv)
} else {
(None, None)
};
let mut render_target_views = Vec::new();
if (image.usage.contains(image::Usage::COLOR_ATTACHMENT)
|| image.usage.contains(image::Usage::TRANSFER_DST))
&& !compressed
&& !depth
{
for layer in 0..image.kind.num_layers() {
for mip in 0..image.mip_levels {
let view = ViewInfo {
resource,
kind: image.kind,
caps: image::ViewCapabilities::empty(),
view_kind,
format: decomposed.rtv.unwrap(),
levels: mip..(mip + 1),
layers: layer..(layer + 1),
};
let rtv = self
.view_image_as_render_target(&view)
.map_err(|_| device::BindError::WrongMemory)?;
if let Some(ref name) = image.internal.debug_name {
set_debug_name(
&rtv,
&format!("{} -- RTV Mip {} Layer {}", name, mip, layer),
);
}
render_target_views.push(rtv);
}
}
};
let mut depth_stencil_views = Vec::new();
if depth {
for layer in 0..image.kind.num_layers() {
for mip in 0..image.mip_levels {
let view = ViewInfo {
resource,
kind: image.kind,
caps: image::ViewCapabilities::empty(),
view_kind,
format: decomposed.dsv.unwrap(),
levels: mip..(mip + 1),
layers: layer..(layer + 1),
};
let dsv = self
.view_image_as_depth_stencil(&view, None)
.map_err(|_| device::BindError::WrongMemory)?;
if let Some(ref name) = image.internal.debug_name {
set_debug_name(
&dsv,
&format!("{} -- DSV Mip {} Layer {}", name, mip, layer),
);
}
depth_stencil_views.push(dsv);
}
}
}
if let Some(ref mut name) = image.internal.debug_name {
set_debug_name(&*resource, name);
if let Some(ref copy_srv) = copy_srv {
set_debug_name_with_suffix(copy_srv, name, " -- Copy SRV");
}
if let Some(ref srv) = srv {
set_debug_name_with_suffix(srv, name, " -- SRV");
}
}
let internal = InternalImage {
raw: resource,
copy_srv,
srv,
unordered_access_views,
depth_stencil_views,
render_target_views,
debug_name: image.internal.debug_name.take(),
};
image.decomposed_format = decomposed;
image.internal = internal;
Ok(())
}
unsafe fn create_image_view(
&self,
image: &Image,
view_kind: image::ViewKind,
format: format::Format,
_swizzle: format::Swizzle,
range: image::SubresourceRange,
) -> Result<ImageView, image::ViewCreationError> {
let is_array = image.kind.num_layers() > 1;
let num_levels = range.resolve_level_count(image.mip_levels);
let num_layers = range.resolve_layer_count(image.kind.num_layers());
let info = ViewInfo {
resource: image.internal.raw,
kind: image.kind,
caps: image.view_caps,
// D3D11 doesn't allow looking at a single slice of an array as a non-array
view_kind: if is_array && view_kind == image::ViewKind::D2 {
image::ViewKind::D2Array
} else if is_array && view_kind == image::ViewKind::D1 {
image::ViewKind::D1Array
} else {
view_kind
},
format: conv::map_format(format).ok_or(image::ViewCreationError::BadFormat(format))?,
levels: range.level_start..range.level_start + num_levels,
layers: range.layer_start..range.layer_start + num_layers,
};
let srv_info = ViewInfo {
format: conv::viewable_format(info.format),
..info.clone()
};
let mut debug_name = image.internal.debug_name.clone();
Ok(ImageView {
subresource: d3d11::D3D11CalcSubresource(
0,
range.layer_start as _,
range.level_start as _,
),
format,
srv_handle: if image.usage.intersects(image::Usage::SAMPLED) {
let srv = self.view_image_as_shader_resource(&srv_info)?;
if let Some(ref mut name) = debug_name {
set_debug_name_with_suffix(&srv, name, " -- SRV");
}
Some(srv.into_raw())
} else {
None
},
rtv_handle: if image.usage.contains(image::Usage::COLOR_ATTACHMENT) {
let rtv = self.view_image_as_render_target(&info)?;
if let Some(ref mut name) = debug_name {
set_debug_name_with_suffix(&rtv, name, " -- RTV");
}
Some(rtv.into_raw())
} else {
None
},
uav_handle: if image.usage.contains(image::Usage::STORAGE) {
let uav = self.view_image_as_unordered_access(&info)?;
if let Some(ref mut name) = debug_name {
set_debug_name_with_suffix(&uav, name, " -- UAV");
}
Some(uav.into_raw())
} else {
None
},
dsv_handle: if image.usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT) {
let dsv = self.view_image_as_depth_stencil(&info, None)?;
if let Some(ref mut name) = debug_name {
set_debug_name_with_suffix(&dsv, name, " -- DSV");
}
Some(dsv.into_raw())
} else {
None
},
rodsv_handle: if image.usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT) {
let rodsv =
self.view_image_as_depth_stencil(&info, Some(image.format.is_stencil()))?;
if let Some(ref mut name) = debug_name {
set_debug_name_with_suffix(&rodsv, name, " -- DSV");
}
Some(rodsv.into_raw())
} else {
None
},
owned: true,
})
}
unsafe fn create_sampler(
&self,
info: &image::SamplerDesc,
) -> Result<Sampler, device::AllocationError> {
assert!(info.normalized);
let op = match info.comparison {
Some(_) => d3d11::D3D11_FILTER_REDUCTION_TYPE_COMPARISON,
None => d3d11::D3D11_FILTER_REDUCTION_TYPE_STANDARD,
};
let desc = d3d11::D3D11_SAMPLER_DESC {
Filter: conv::map_filter(
info.min_filter,
info.mag_filter,
info.mip_filter,
op,
info.anisotropy_clamp,
),
AddressU: conv::map_wrapping(info.wrap_mode.0),
AddressV: conv::map_wrapping(info.wrap_mode.1),
AddressW: conv::map_wrapping(info.wrap_mode.2),
MipLODBias: info.lod_bias.0,
MaxAnisotropy: info.anisotropy_clamp.map_or(0, |aniso| aniso as u32),
ComparisonFunc: info.comparison.map_or(0, |comp| conv::map_comparison(comp)),
BorderColor: info.border.into(),
MinLOD: info.lod_range.start.0,
MaxLOD: info.lod_range.end.0,
};
let mut sampler = ptr::null_mut();
let hr = self
.raw
.CreateSamplerState(&desc, &mut sampler as *mut *mut _ as *mut *mut _);
assert_eq!(true, winerror::SUCCEEDED(hr));
Ok(Sampler {
sampler_handle: ComPtr::from_raw(sampler),
})
}
unsafe fn create_descriptor_pool<I>(
&self,
_max_sets: usize,
ranges: I,
_flags: pso::DescriptorPoolCreateFlags,
) -> Result<DescriptorPool, device::OutOfMemory>
where
I: IntoIterator,
I::Item: Borrow<pso::DescriptorRangeDesc>,
{
let mut total = RegisterData::default();
for range in ranges {
let r = range.borrow();
let content = DescriptorContent::from(r.ty);
total.add_content_many(content, r.count as DescriptorIndex);
}
let max_stages = 6;
let count = total.sum() * max_stages;
Ok(DescriptorPool::with_capacity(count))
}
unsafe fn create_descriptor_set_layout<I, J>(
&self,
layout_bindings: I,
_immutable_samplers: J,
) -> Result<DescriptorSetLayout, device::OutOfMemory>
where
I: IntoIterator,
I::Item: Borrow<pso::DescriptorSetLayoutBinding>,
J: IntoIterator,
J::Item: Borrow<Sampler>,
{
let mut total = MultiStageData::<RegisterData<_>>::default();
let mut bindings = layout_bindings
.into_iter()
.map(|b| b.borrow().clone())
.collect::<Vec<_>>();
for binding in bindings.iter() {
let content = DescriptorContent::from(binding.ty);
// If this binding is used by the graphics pipeline and is a UAV, it belongs to the "Output Merger"
// stage, so we only put them in the fragment stage to save redundant descriptor allocations.
let stage_flags = if content.contains(DescriptorContent::UAV)
&& binding
.stage_flags
.intersects(pso::ShaderStageFlags::ALL - pso::ShaderStageFlags::COMPUTE)
{
let mut stage_flags = pso::ShaderStageFlags::FRAGMENT;
stage_flags.set(
pso::ShaderStageFlags::COMPUTE,
binding.stage_flags.contains(pso::ShaderStageFlags::COMPUTE),
);
stage_flags
} else {
binding.stage_flags
};
total.add_content_many(content, stage_flags, binding.count as _);
}
bindings.sort_by_key(|a| a.binding);
let accum = total.map_register(|count| RegisterAccumulator {
res_index: *count as ResourceIndex,
});
Ok(DescriptorSetLayout {
bindings: Arc::new(bindings),
pool_mapping: accum.to_mapping(),
})
}
unsafe fn write_descriptor_set<'a, I>(&self, op: pso::DescriptorSetWrite<'a, Backend, I>)
where
I: IntoIterator,
I::Item: Borrow<pso::Descriptor<'a, Backend>>,
{
// Get baseline mapping
let mut mapping = op
.set
.layout
.pool_mapping
.map_register(|mapping| mapping.offset);
// Iterate over layout bindings until the first binding is found.
let binding_start = op
.set
.layout
.bindings
.iter()
.position(|binding| binding.binding == op.binding)
.unwrap();
// If we've skipped layout bindings, we need to add them to get the correct binding offset
for binding in &op.set.layout.bindings[..binding_start] {
let content = DescriptorContent::from(binding.ty);
mapping.add_content_many(content, binding.stage_flags, binding.count as _);
}
// We start at the given binding index and array index
let mut binding_index = binding_start;
let mut array_index = op.array_offset;
// If we're skipping array indices in the current binding, we need to add them to get the correct binding offset
if array_index > 0 {
let binding: &pso::DescriptorSetLayoutBinding = &op.set.layout.bindings[binding_index];
let content = DescriptorContent::from(binding.ty);
mapping.add_content_many(content, binding.stage_flags, array_index as _);
}
// Iterate over the descriptors, figuring out the corresponding binding, and adding
// it to the set of bindings.
//
// When we hit the end of an array of descriptors and there are still descriptors left
// over, we will spill into writing the next binding.
for descriptor in op.descriptors {
let binding: &pso::DescriptorSetLayoutBinding = &op.set.layout.bindings[binding_index];
let handles = match *descriptor.borrow() {
pso::Descriptor::Buffer(buffer, ref _sub) => RegisterData {
c: match buffer.internal.disjoint_cb {
Some(dj_buf) => dj_buf as *mut _,
None => buffer.internal.raw as *mut _,
},
t: buffer.internal.srv.map_or(ptr::null_mut(), |p| p as *mut _),
u: buffer.internal.uav.map_or(ptr::null_mut(), |p| p as *mut _),
s: ptr::null_mut(),
},
pso::Descriptor::Image(image, _layout) => RegisterData {
c: ptr::null_mut(),
t: image.srv_handle.map_or(ptr::null_mut(), |h| h as *mut _),
u: image.uav_handle.map_or(ptr::null_mut(), |h| h as *mut _),
s: ptr::null_mut(),
},
pso::Descriptor::Sampler(sampler) => RegisterData {
c: ptr::null_mut(),
t: ptr::null_mut(),
u: ptr::null_mut(),
s: sampler.sampler_handle.as_raw() as *mut _,
},
pso::Descriptor::CombinedImageSampler(image, _layout, sampler) => RegisterData {
c: ptr::null_mut(),
t: image.srv_handle.map_or(ptr::null_mut(), |h| h as *mut _),
u: image.uav_handle.map_or(ptr::null_mut(), |h| h as *mut _),
s: sampler.sampler_handle.as_raw() as *mut _,
},
pso::Descriptor::TexelBuffer(_buffer_view) => unimplemented!(),
};
let content = DescriptorContent::from(binding.ty);
if content.contains(DescriptorContent::CBV) {
let offsets = mapping.map_other(|map| map.c);
op.set
.assign_stages(&offsets, binding.stage_flags, handles.c);
};
if content.contains(DescriptorContent::SRV) {
let offsets = mapping.map_other(|map| map.t);
op.set
.assign_stages(&offsets, binding.stage_flags, handles.t);
};
if content.contains(DescriptorContent::UAV) {
// If this binding is used by the graphics pipeline and is a UAV, it belongs to the "Output Merger"
// stage, so we only put them in the fragment stage to save redundant descriptor allocations.
let stage_flags = if binding
.stage_flags
.intersects(pso::ShaderStageFlags::ALL - pso::ShaderStageFlags::COMPUTE)
{
let mut stage_flags = pso::ShaderStageFlags::FRAGMENT;
stage_flags.set(
pso::ShaderStageFlags::COMPUTE,
binding.stage_flags.contains(pso::ShaderStageFlags::COMPUTE),
);
stage_flags
} else {
binding.stage_flags
};
let offsets = mapping.map_other(|map| map.u);
op.set.assign_stages(&offsets, stage_flags, handles.u);
};
if content.contains(DescriptorContent::SAMPLER) {
let offsets = mapping.map_other(|map| map.s);
op.set
.assign_stages(&offsets, binding.stage_flags, handles.s);
};
mapping.add_content_many(content, binding.stage_flags, 1);
array_index += 1;
if array_index >= binding.count {
// We've run out of array to write to, we should overflow to the next binding.
array_index = 0;
binding_index += 1;
}
}
}
unsafe fn copy_descriptor_set<'a>(&self, _op: pso::DescriptorSetCopy<'a, Backend>) {
unimplemented!()
/*
for offset in 0 .. copy.count {
let (dst_ty, dst_handle_offset, dst_second_handle_offset) = copy
.dst_set
.get_handle_offset(copy.dst_binding + offset as u32);
let (src_ty, src_handle_offset, src_second_handle_offset) = copy
.src_set
.get_handle_offset(copy.src_binding + offset as u32);
assert_eq!(dst_ty, src_ty);
let dst_handle = copy.dst_set.handles.offset(dst_handle_offset as isize);
let src_handle = copy.dst_set.handles.offset(src_handle_offset as isize);
match dst_ty {
pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Sampled { with_sampler: true }
} => {
let dst_second_handle = copy
.dst_set
.handles
.offset(dst_second_handle_offset as isize);
let src_second_handle = copy
.dst_set
.handles
.offset(src_second_handle_offset as isize);
*dst_handle = *src_handle;
*dst_second_handle = *src_second_handle;
}
_ => *dst_handle = *src_handle,
}
}*/
}
unsafe fn map_memory(
&self,
memory: &mut Memory,
segment: memory::Segment,
) -> Result<*mut u8, device::MapError> {
Ok(memory.host_ptr.offset(segment.offset as isize))
}
unsafe fn unmap_memory(&self, _memory: &mut Memory) {
// persistent mapping FTW
}
unsafe fn flush_mapped_memory_ranges<'a, I>(&self, ranges: I) -> Result<(), device::OutOfMemory>
where
I: IntoIterator,
I::Item: Borrow<(&'a Memory, memory::Segment)>,
{
let _scope = debug_scope!(&self.context, "FlushMappedRanges");
// go through every range we wrote to
for range in ranges.into_iter() {
let &(memory, ref segment) = range.borrow();
let range = memory.resolve(segment);
let _scope = debug_scope!(&self.context, "Range({:?})", range);
memory.flush(&self.context, range);
}
Ok(())
}
unsafe fn invalidate_mapped_memory_ranges<'a, I>(
&self,
ranges: I,
) -> Result<(), device::OutOfMemory>
where
I: IntoIterator,
I::Item: Borrow<(&'a Memory, memory::Segment)>,
{
let _scope = debug_scope!(&self.context, "InvalidateMappedRanges");
// go through every range we want to read from
for range in ranges.into_iter() {
let &(memory, ref segment) = range.borrow();
let range = memory.resolve(segment);
let _scope = debug_scope!(&self.context, "Range({:?})", range);
memory.invalidate(
&self.context,
range,
self.internal.working_buffer.clone(),
self.internal.working_buffer_size,
);
}
Ok(())
}
fn create_semaphore(&self) -> Result<Semaphore, device::OutOfMemory> {
// TODO:
Ok(Semaphore)
}
fn create_fence(&self, signalled: bool) -> Result<Fence, device::OutOfMemory> {
Ok(Arc::new(RawFence {
mutex: Mutex::new(signalled),
condvar: Condvar::new(),
}))
}
unsafe fn reset_fence(&self, fence: &mut Fence) -> Result<(), device::OutOfMemory> {
*fence.mutex.lock() = false;
Ok(())
}
unsafe fn wait_for_fence(
&self,
fence: &Fence,
timeout_ns: u64,
) -> Result<bool, device::WaitError> {
use std::time::{Duration, Instant};
debug!("wait_for_fence {:?} for {} ns", fence, timeout_ns);
let mut guard = fence.mutex.lock();
match timeout_ns {
0 => Ok(*guard),
0xFFFFFFFFFFFFFFFF => {
while !*guard {
fence.condvar.wait(&mut guard);
}
Ok(true)
}
_ => {
let total = Duration::from_nanos(timeout_ns as u64);
let now = Instant::now();
while !*guard {
let duration = match total.checked_sub(now.elapsed()) {
Some(dur) => dur,
None => return Ok(false),
};
let result = fence.condvar.wait_for(&mut guard, duration);
if result.timed_out() {
return Ok(false);
}
}
Ok(true)
}
}
}
unsafe fn get_fence_status(&self, fence: &Fence) -> Result<bool, device::DeviceLost> {
Ok(*fence.mutex.lock())
}
fn create_event(&self) -> Result<(), device::OutOfMemory> {
unimplemented!()
}
unsafe fn get_event_status(&self, _event: &()) -> Result<bool, device::WaitError> {
unimplemented!()
}
unsafe fn set_event(&self, _event: &mut ()) -> Result<(), device::OutOfMemory> {
unimplemented!()
}
unsafe fn reset_event(&self, _event: &mut ()) -> Result<(), device::OutOfMemory> {
unimplemented!()
}
unsafe fn free_memory(&self, mut memory: Memory) {
if !memory.host_ptr.is_null() {
let _vec =
Vec::from_raw_parts(memory.host_ptr, memory.size as usize, memory.size as usize);
// let it drop
memory.host_ptr = ptr::null_mut();
}
for (_, (_range, mut internal)) in memory.local_buffers.write().drain() {
internal.release_resources()
}
}
unsafe fn create_query_pool(
&self,
_query_ty: query::Type,
_count: query::Id,
) -> Result<QueryPool, query::CreationError> {
unimplemented!()
}
unsafe fn destroy_query_pool(&self, _pool: QueryPool) {
unimplemented!()
}
unsafe fn get_query_pool_results(
&self,
_pool: &QueryPool,
_queries: Range<query::Id>,
_data: &mut [u8],
_stride: buffer::Stride,
_flags: query::ResultFlags,
) -> Result<bool, device::WaitError> {
unimplemented!()
}
unsafe fn destroy_shader_module(&self, _shader_lib: ShaderModule) {}
unsafe fn destroy_render_pass(&self, _rp: RenderPass) {
//unimplemented!()
}
unsafe fn destroy_pipeline_layout(&self, _layout: PipelineLayout) {
//unimplemented!()
}
unsafe fn destroy_graphics_pipeline(&self, _pipeline: GraphicsPipeline) {}
unsafe fn destroy_compute_pipeline(&self, _pipeline: ComputePipeline) {}
unsafe fn destroy_framebuffer(&self, _fb: Framebuffer) {}
unsafe fn destroy_buffer(&self, buffer: Buffer) {
let mut internal = buffer.internal;
if internal.raw.is_null() {
return;
}
let arena_arc = match buffer.local_memory_arena.upgrade() {
Some(arena) => arena,
// Memory is destroyed before the buffer, we've already been destroyed.
None => return,
};
let mut arena = arena_arc.write();
let memory_index = buffer.memory_index.expect("Buffer's memory index unset");
// Drop the internal stored by the arena on the floor, it owns nothing.
let _ = arena.remove(memory_index);
// Release all memory owned by this buffer
internal.release_resources();
}
unsafe fn destroy_buffer_view(&self, _view: BufferView) {
//unimplemented!()
}
unsafe fn destroy_image(&self, mut image: Image) {
image.internal.release_resources();
}
unsafe fn destroy_image_view(&self, _view: ImageView) {
//unimplemented!()
}
unsafe fn destroy_sampler(&self, _sampler: Sampler) {}
unsafe fn destroy_descriptor_pool(&self, _pool: DescriptorPool) {
//unimplemented!()
}
unsafe fn destroy_descriptor_set_layout(&self, _layout: DescriptorSetLayout) {
//unimplemented!()
}
unsafe fn destroy_fence(&self, _fence: Fence) {
// unimplemented!()
}
unsafe fn destroy_semaphore(&self, _semaphore: Semaphore) {
//unimplemented!()
}
unsafe fn destroy_event(&self, _event: ()) {
//unimplemented!()
}
fn wait_idle(&self) -> Result<(), device::OutOfMemory> {
Ok(())
// unimplemented!()
}
unsafe fn set_image_name(&self, image: &mut Image, name: &str) {
if !verify_debug_ascii(name) {
return;
}
image.internal.debug_name = Some(name.to_string());
}
unsafe fn set_buffer_name(&self, buffer: &mut Buffer, name: &str) {
if !verify_debug_ascii(name) {
return;
}
buffer.internal.debug_name = Some(name.to_string());
}
unsafe fn set_command_buffer_name(&self, command_buffer: &mut CommandBuffer, name: &str) {
if !verify_debug_ascii(name) {
return;
}
command_buffer.debug_name = Some(name.to_string());
}
unsafe fn set_semaphore_name(&self, _semaphore: &mut Semaphore, _name: &str) {
// TODO
}
unsafe fn set_fence_name(&self, _fence: &mut Fence, _name: &str) {
// TODO
}
unsafe fn set_framebuffer_name(&self, _framebuffer: &mut Framebuffer, _name: &str) {
// TODO
}
unsafe fn set_render_pass_name(&self, _render_pass: &mut RenderPass, _name: &str) {
// TODO
}
unsafe fn set_descriptor_set_name(&self, _descriptor_set: &mut DescriptorSet, _name: &str) {
// TODO
}
unsafe fn set_descriptor_set_layout_name(
&self,
_descriptor_set_layout: &mut DescriptorSetLayout,
_name: &str,
) {
// TODO
}
unsafe fn set_pipeline_layout_name(&self, _pipeline_layout: &mut PipelineLayout, _name: &str) {
// TODO
}
}
|
{
set_debug_name_with_suffix(&*srv, name, " -- SRV");
}
|
prepare_syllable_counts.py
|
#!/usr/bin/env python3
import sys
import codecs
def main():
syllable_counts = {}
filepath = sys.argv[1]
lines = codecs.open(filepath, encoding="iso-8859-1").read().split("\n")
for line in lines:
if line.startswith(";;;") or len(line) == 0 or line.isspace():
continue
word, phonemes = line.split(maxsplit=1)
word = word.lower()
syllable_count = 0
for phoneme in phonemes.split():
if phoneme[-1].isdigit():
syllable_count += 1
syllable_counts[word] = syllable_count
for word in sorted(syllable_counts.keys()):
syllable_count = syllable_counts[word]
print(word + " " + str(syllable_count))
if __name__ == "__main__":
|
main()
|
|
chain.go
|
package full
import (
"context"
"github.com/filecoin-project/go-lotus/api"
"github.com/filecoin-project/go-lotus/chain/store"
"github.com/filecoin-project/go-lotus/chain/types"
"golang.org/x/xerrors"
"github.com/ipfs/go-cid"
"go.uber.org/fx"
)
type ChainAPI struct {
fx.In
WalletAPI
Chain *store.ChainStore
}
func (a *ChainAPI) ChainNotify(ctx context.Context) (<-chan []*store.HeadChange, error) {
return a.Chain.SubHeadChanges(ctx), nil
}
func (a *ChainAPI) ChainHead(context.Context) (*types.TipSet, error) {
return a.Chain.GetHeaviestTipSet(), nil
}
func (a *ChainAPI) ChainGetRandomness(ctx context.Context, pts *types.TipSet, tickets []*types.Ticket, lb int) ([]byte, error) {
return a.Chain.GetRandomness(ctx, pts.Cids(), tickets, int64(lb))
}
func (a *ChainAPI) ChainGetBlock(ctx context.Context, msg cid.Cid) (*types.BlockHeader, error) {
return a.Chain.GetBlock(msg)
}
func (a *ChainAPI) ChainGetTipSet(ctx context.Context, cids []cid.Cid) (*types.TipSet, error) {
return a.Chain.LoadTipSet(cids)
}
func (a *ChainAPI) ChainGetBlockMessages(ctx context.Context, msg cid.Cid) (*api.BlockMessages, error) {
b, err := a.Chain.GetBlock(msg)
if err != nil
|
bmsgs, smsgs, err := a.Chain.MessagesForBlock(b)
if err != nil {
return nil, err
}
cids := make([]cid.Cid, len(bmsgs)+len(smsgs))
for i, m := range bmsgs {
cids[i] = m.Cid()
}
for i, m := range smsgs {
cids[i+len(bmsgs)] = m.Cid()
}
return &api.BlockMessages{
BlsMessages: bmsgs,
SecpkMessages: smsgs,
Cids: cids,
}, nil
}
func (a *ChainAPI) ChainGetParentMessages(ctx context.Context, bcid cid.Cid) ([]api.Message, error) {
b, err := a.Chain.GetBlock(bcid)
if err != nil {
return nil, err
}
// genesis block has no parent messages...
if b.Height == 0 {
return nil, nil
}
// TODO: need to get the number of messages better than this
pts, err := a.Chain.LoadTipSet(b.Parents)
if err != nil {
return nil, err
}
cm, err := a.Chain.MessagesForTipset(pts)
if err != nil {
return nil, err
}
var out []api.Message
for _, m := range cm {
out = append(out, api.Message{
Cid: m.Cid(),
Message: m.VMMessage(),
})
}
return out, nil
}
func (a *ChainAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([]*types.MessageReceipt, error) {
b, err := a.Chain.GetBlock(bcid)
if err != nil {
return nil, err
}
if b.Height == 0 {
return nil, nil
}
// TODO: need to get the number of messages better than this
pts, err := a.Chain.LoadTipSet(b.Parents)
if err != nil {
return nil, err
}
cm, err := a.Chain.MessagesForTipset(pts)
if err != nil {
return nil, err
}
var out []*types.MessageReceipt
for i := 0; i < len(cm); i++ {
r, err := a.Chain.GetParentReceipt(b, i)
if err != nil {
return nil, err
}
out = append(out, r)
}
return out, nil
}
func (a *ChainAPI) ChainGetTipSetByHeight(ctx context.Context, h uint64, ts *types.TipSet) (*types.TipSet, error) {
return a.Chain.GetTipsetByHeight(ctx, h, ts)
}
func (a *ChainAPI) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, error) {
blk, err := a.Chain.Blockstore().Get(obj)
if err != nil {
return nil, xerrors.Errorf("blockstore get: %w", err)
}
return blk.RawData(), nil
}
func (a *ChainAPI) ChainSetHead(ctx context.Context, ts *types.TipSet) error {
return a.Chain.SetHead(ts)
}
func (a *ChainAPI) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) {
genb, err := a.Chain.GetGenesis()
if err != nil {
return nil, err
}
return types.NewTipSet([]*types.BlockHeader{genb})
}
func (a *ChainAPI) ChainTipSetWeight(ctx context.Context, ts *types.TipSet) (types.BigInt, error) {
return a.Chain.Weight(ctx, ts)
}
|
{
return nil, err
}
|
list_databases_builder.rs
|
use crate::clients::{Client, CosmosUriBuilder, ResourceType};
use crate::prelude::*;
use crate::responses::ListDatabasesResponse;
use crate::ClientRequired;
use azure_sdk_core::errors::{check_status_extract_headers_and_body, AzureError};
use azure_sdk_core::prelude::*;
use futures::stream::{unfold, Stream};
use hyper::StatusCode;
use std::convert::TryInto;
#[derive(Debug)]
pub struct ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
client: &'a Client<CUB>,
user_agent: Option<&'a str>,
activity_id: Option<&'a str>,
consistency_level: Option<ConsistencyLevel<'a>>,
continuation: Option<&'a str>,
max_item_count: i32,
}
impl<'a, CUB> ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
pub(crate) fn new(client: &'a Client<CUB>) -> ListDatabasesBuilder<'a, CUB> {
ListDatabasesBuilder {
client,
user_agent: None,
activity_id: None,
consistency_level: None,
continuation: None,
max_item_count: -1,
}
}
}
impl<'a, CUB> Clone for ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
fn clone(&self) -> Self {
ListDatabasesBuilder {
client: self.client,
user_agent: self.user_agent,
activity_id: self.activity_id,
consistency_level: self.consistency_level,
continuation: self.continuation,
max_item_count: self.max_item_count,
}
}
}
impl<'a, CUB> ClientRequired<'a, CUB> for ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
fn client(&self) -> &'a Client<CUB> {
self.client
}
}
impl<'a, CUB> UserAgentOption<'a> for ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
fn user_agent(&self) -> Option<&'a str> {
self.user_agent
}
}
impl<'a, CUB> ActivityIdOption<'a> for ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
fn activity_id(&self) -> Option<&'a str> {
self.activity_id
}
}
impl<'a, CUB> ConsistencyLevelOption<'a> for ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
fn consistency_level(&self) -> Option<ConsistencyLevel<'a>> {
self.consistency_level
}
}
impl<'a, CUB> ContinuationOption<'a> for ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
fn continuation(&self) -> Option<&'a str> {
self.continuation
}
}
impl<'a, CUB> MaxItemCountOption for ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
fn max_item_count(&self) -> i32 {
self.max_item_count
}
}
impl<'a, CUB> UserAgentSupport<'a> for ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
type O = ListDatabasesBuilder<'a, CUB>;
fn with_user_agent(self, user_agent: &'a str) -> Self::O {
ListDatabasesBuilder {
client: self.client,
user_agent: Some(user_agent),
activity_id: self.activity_id,
consistency_level: self.consistency_level,
continuation: self.continuation,
max_item_count: self.max_item_count,
}
}
}
impl<'a, CUB> ActivityIdSupport<'a> for ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
type O = ListDatabasesBuilder<'a, CUB>;
fn
|
(self, activity_id: &'a str) -> Self::O {
ListDatabasesBuilder {
client: self.client,
user_agent: self.user_agent,
activity_id: Some(activity_id),
consistency_level: self.consistency_level,
continuation: self.continuation,
max_item_count: self.max_item_count,
}
}
}
impl<'a, CUB> ConsistencyLevelSupport<'a> for ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
type O = ListDatabasesBuilder<'a, CUB>;
fn with_consistency_level(self, consistency_level: ConsistencyLevel<'a>) -> Self::O {
ListDatabasesBuilder {
client: self.client,
user_agent: self.user_agent,
activity_id: self.activity_id,
consistency_level: Some(consistency_level),
continuation: self.continuation,
max_item_count: self.max_item_count,
}
}
}
impl<'a, CUB> ContinuationSupport<'a> for ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
type O = ListDatabasesBuilder<'a, CUB>;
fn with_continuation(self, continuation: &'a str) -> Self::O {
ListDatabasesBuilder {
client: self.client,
user_agent: self.user_agent,
activity_id: self.activity_id,
consistency_level: self.consistency_level,
continuation: Some(continuation),
max_item_count: self.max_item_count,
}
}
}
impl<'a, CUB> MaxItemCountSupport for ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
type O = ListDatabasesBuilder<'a, CUB>;
fn with_max_item_count(self, max_item_count: i32) -> Self::O {
ListDatabasesBuilder {
client: self.client,
user_agent: self.user_agent,
activity_id: self.activity_id,
consistency_level: self.consistency_level,
continuation: self.continuation,
max_item_count,
}
}
}
// methods callable only when every mandatory field has been filled
impl<'a, CUB> ListDatabasesBuilder<'a, CUB>
where
CUB: CosmosUriBuilder,
{
pub async fn execute(&self) -> Result<ListDatabasesResponse, AzureError> {
trace!("ListDatabasesBuilder::execute called");
let request =
self.client
.prepare_request("dbs", hyper::Method::GET, ResourceType::Databases);
let request = UserAgentOption::add_header(self, request);
let request = ActivityIdOption::add_header(self, request);
let request = ConsistencyLevelOption::add_header(self, request);
let request = ContinuationOption::add_header(self, request);
let request = MaxItemCountOption::add_header(self, request);
let request = request.body(hyper::Body::empty())?;
let future_response = self.client.hyper_client().request(request);
let (headers, body) =
check_status_extract_headers_and_body(future_response, StatusCode::OK).await?;
Ok((&headers, &body as &[u8]).try_into()?)
}
pub fn stream(&self) -> impl Stream<Item = Result<ListDatabasesResponse, AzureError>> + '_ {
#[derive(Debug, Clone, PartialEq)]
enum States {
Init,
Continuation(String),
};
unfold(
Some(States::Init),
move |continuation_token: Option<States>| {
async move {
debug!("continuation_token == {:?}", &continuation_token);
let response = match continuation_token {
Some(States::Init) => self.execute().await,
Some(States::Continuation(continuation_token)) => {
self.clone()
.with_continuation(&continuation_token)
.execute()
.await
}
None => return None,
};
// the ? operator does not work in async move (yet?)
// so we have to resort to this boilerplate
let response = match response {
Ok(response) => response,
Err(err) => return Some((Err(err), None)),
};
let continuation_token = match &response.continuation_token {
Some(ct) => Some(States::Continuation(ct.to_owned())),
None => None,
};
Some((Ok(response), continuation_token))
}
},
)
}
}
|
with_activity_id
|
intenset.rs
|
#[doc = "Register `INTENSET` reader"]
pub struct R(crate::R<INTENSET_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<INTENSET_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<INTENSET_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<INTENSET_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `INTENSET` writer"]
pub struct W(crate::W<INTENSET_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<INTENSET_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<INTENSET_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<INTENSET_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `MB` reader - Master On Bus Interrupt Enable"]
pub struct MB_R(crate::FieldReader<bool, bool>);
impl MB_R {
pub(crate) fn new(bits: bool) -> Self {
MB_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for MB_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `MB` writer - Master On Bus Interrupt Enable"]
pub struct MB_W<'a> {
w: &'a mut W,
}
impl<'a> MB_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u8 & 0x01);
self.w
}
}
#[doc = "Field `SB` reader - Slave On Bus Interrupt Enable"]
pub struct SB_R(crate::FieldReader<bool, bool>);
impl SB_R {
pub(crate) fn new(bits: bool) -> Self {
SB_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SB_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SB` writer - Slave On Bus Interrupt Enable"]
pub struct SB_W<'a> {
w: &'a mut W,
}
impl<'a> SB_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u8 & 0x01) << 1);
self.w
}
}
#[doc = "Field `ERROR` reader - Combined Error Interrupt Enable"]
pub struct ERROR_R(crate::FieldReader<bool, bool>);
impl ERROR_R {
pub(crate) fn new(bits: bool) -> Self {
ERROR_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for ERROR_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `ERROR` writer - Combined Error Interrupt Enable"]
pub struct ERROR_W<'a> {
w: &'a mut W,
}
impl<'a> ERROR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W
|
}
impl R {
#[doc = "Bit 0 - Master On Bus Interrupt Enable"]
#[inline(always)]
pub fn mb(&self) -> MB_R {
MB_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Slave On Bus Interrupt Enable"]
#[inline(always)]
pub fn sb(&self) -> SB_R {
SB_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 7 - Combined Error Interrupt Enable"]
#[inline(always)]
pub fn error(&self) -> ERROR_R {
ERROR_R::new(((self.bits >> 7) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Master On Bus Interrupt Enable"]
#[inline(always)]
pub fn mb(&mut self) -> MB_W {
MB_W { w: self }
}
#[doc = "Bit 1 - Slave On Bus Interrupt Enable"]
#[inline(always)]
pub fn sb(&mut self) -> SB_W {
SB_W { w: self }
}
#[doc = "Bit 7 - Combined Error Interrupt Enable"]
#[inline(always)]
pub fn error(&mut self) -> ERROR_W {
ERROR_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u8) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "I2CM Interrupt Enable Set\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [intenset](index.html) module"]
pub struct INTENSET_SPEC;
impl crate::RegisterSpec for INTENSET_SPEC {
type Ux = u8;
}
#[doc = "`read()` method returns [intenset::R](R) reader structure"]
impl crate::Readable for INTENSET_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [intenset::W](W) writer structure"]
impl crate::Writable for INTENSET_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets INTENSET to value 0"]
impl crate::Resettable for INTENSET_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
|
{
self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u8 & 0x01) << 7);
self.w
}
|
actions_graphql.go
|
package ticker
import (
"github.com/stellar-modules/go/services/ticker/internal/gql"
"github.com/stellar-modules/go/services/ticker/internal/tickerdb"
hlog "github.com/stellar-modules/go/sdk/support/log"
)
func StartGraphQLServer(s *tickerdb.TickerSession, l *hlog.Entry, port string)
|
{
graphql := gql.New(s, l)
graphql.Serve(port)
}
|
|
express_route_circuit_peering.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Kulado Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import kulado
import kulado.runtime
from .. import utilities, tables
class ExpressRouteCircuitPeering(kulado.CustomResource):
|
azure_asn: kulado.Output[float]
"""
The ASN used by Azure.
"""
express_route_circuit_name: kulado.Output[str]
"""
The name of the ExpressRoute Circuit in which to create the Peering.
"""
microsoft_peering_config: kulado.Output[dict]
"""
A `microsoft_peering_config` block as defined below. Required when `peering_type` is set to `MicrosoftPeering`.
"""
peer_asn: kulado.Output[float]
"""
The Either a 16-bit or a 32-bit ASN. Can either be public or private..
"""
peering_type: kulado.Output[str]
"""
The type of the ExpressRoute Circuit Peering. Acceptable values include `AzurePrivatePeering`, `AzurePublicPeering` and `MicrosoftPeering`. Changing this forces a new resource to be created.
"""
primary_azure_port: kulado.Output[str]
"""
The Primary Port used by Azure for this Peering.
"""
primary_peer_address_prefix: kulado.Output[str]
"""
A `/30` subnet for the primary link.
"""
resource_group_name: kulado.Output[str]
"""
The name of the resource group in which to
create the Express Route Circuit Peering. Changing this forces a new resource to be created.
"""
secondary_azure_port: kulado.Output[str]
"""
The Secondary Port used by Azure for this Peering.
"""
secondary_peer_address_prefix: kulado.Output[str]
"""
A `/30` subnet for the secondary link.
"""
shared_key: kulado.Output[str]
"""
The shared key. Can be a maximum of 25 characters.
"""
vlan_id: kulado.Output[float]
"""
A valid VLAN ID to establish this peering on.
"""
def __init__(__self__, resource_name, opts=None, express_route_circuit_name=None, microsoft_peering_config=None, peer_asn=None, peering_type=None, primary_peer_address_prefix=None, resource_group_name=None, secondary_peer_address_prefix=None, shared_key=None, vlan_id=None, __name__=None, __opts__=None):
"""
Manages an ExpressRoute Circuit Peering.
:param str resource_name: The name of the resource.
:param kulado.ResourceOptions opts: Options for the resource.
:param kulado.Input[str] express_route_circuit_name: The name of the ExpressRoute Circuit in which to create the Peering.
:param kulado.Input[dict] microsoft_peering_config: A `microsoft_peering_config` block as defined below. Required when `peering_type` is set to `MicrosoftPeering`.
:param kulado.Input[float] peer_asn: The Either a 16-bit or a 32-bit ASN. Can either be public or private..
:param kulado.Input[str] peering_type: The type of the ExpressRoute Circuit Peering. Acceptable values include `AzurePrivatePeering`, `AzurePublicPeering` and `MicrosoftPeering`. Changing this forces a new resource to be created.
:param kulado.Input[str] primary_peer_address_prefix: A `/30` subnet for the primary link.
:param kulado.Input[str] resource_group_name: The name of the resource group in which to
create the Express Route Circuit Peering. Changing this forces a new resource to be created.
:param kulado.Input[str] secondary_peer_address_prefix: A `/30` subnet for the secondary link.
:param kulado.Input[str] shared_key: The shared key. Can be a maximum of 25 characters.
:param kulado.Input[float] vlan_id: A valid VLAN ID to establish this peering on.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/express_route_circuit_peering.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, kulado.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if express_route_circuit_name is None:
raise TypeError("Missing required property 'express_route_circuit_name'")
__props__['express_route_circuit_name'] = express_route_circuit_name
__props__['microsoft_peering_config'] = microsoft_peering_config
__props__['peer_asn'] = peer_asn
if peering_type is None:
raise TypeError("Missing required property 'peering_type'")
__props__['peering_type'] = peering_type
if primary_peer_address_prefix is None:
raise TypeError("Missing required property 'primary_peer_address_prefix'")
__props__['primary_peer_address_prefix'] = primary_peer_address_prefix
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if secondary_peer_address_prefix is None:
raise TypeError("Missing required property 'secondary_peer_address_prefix'")
__props__['secondary_peer_address_prefix'] = secondary_peer_address_prefix
__props__['shared_key'] = shared_key
if vlan_id is None:
raise TypeError("Missing required property 'vlan_id'")
__props__['vlan_id'] = vlan_id
__props__['azure_asn'] = None
__props__['primary_azure_port'] = None
__props__['secondary_azure_port'] = None
super(ExpressRouteCircuitPeering, __self__).__init__(
'azure:network/expressRouteCircuitPeering:ExpressRouteCircuitPeering',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
|
hphosts.go
|
package lists
var hphosts = List{
Key: "hosts-file.net",
Name: "hpHosts",
URL: "http://hosts-file.net/",
Description: `hpHosts is a community managed and maintained hosts file that allows an additional layer of protection against access to ad, tracking and malicious websites.`,
Iterator: func() Iterator {
fn := func(category, description string) func(row []string) *Entry {
return func(row []string) *Entry {
if len(row) < 2 {
return nil
}
if row[1] == "localhost" {
return nil
}
return &Entry{
Domain: row[1],
Category: category,
Description: description,
}
}
}
return Combine(
SSV("http://hosts-file.net/emd.txt", fn("malware", "engaged in malware distribution")),
SSV("http://hosts-file.net/exp.txt", fn("malware", "engaged in the housing, development or distribution of exploits")),
SSV("http://hosts-file.net/psh.txt", fn("phishing", "engaged in phishing")),
)
}(),
}
func
|
() {
Lists = append(Lists, hphosts)
}
|
init
|
radial-chart-default-donut-example.ts
|
/**
* @license
* Copyright 2021 Dynatrace LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Component } from '@angular/core';
@Component({
selector: 'dt-example-radial-chart-donut-default',
|
templateUrl: './radial-chart-default-donut-example.html',
})
export class DtExampleRadialChartDefaultDonut {
_chartSeries = [
{
name: 'Chrome',
value: 43,
},
{
name: 'Safari',
value: 22,
},
{
name: 'Firefox',
value: 15,
},
{
name: 'Microsoft Edge',
value: 9,
},
];
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.