file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
mod.ts | import { readCount } from "./read/readCount.ts"; |
||
value-converter.ts | import { inject } from 'aurelia-dependency-injection'
import { Project, ProjectItem, CLIOptions, UI } from 'aurelia-cli'
@inject(Project, CLIOptions, UI)
export default class | {
constructor (
private project: Project,
private options: CLIOptions,
private ui: UI,
) {}
async execute () {
const name = await this.ui.ensureAnswer(
this.options.args[0],
'What would you like to call the value converter?',
)
let fileName = this.project.makeFileName(name)
let className = this.project.makeClassName(name)
this.project.valueConverters.add(
ProjectItem.text(`${fileName}.ts`, this.generateSource(className)),
)
await this.project.commitChanges()
await this.ui.log(`Created ${fileName}.`)
}
generateSource (className) {
return `export class ${className}ValueConverter {
toView(value) {
//
}
fromView(value) {
//
}
}
`
}
}
| ValueConverterGenerator |
object_tracking_2d_fair_mot_learner.py | # Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import torch
import ntpath
import shutil
import numpy as np
import onnxruntime as ort
from torchvision.transforms import transforms as T
from opendr.engine.learners import Learner
from opendr.engine.datasets import DatasetIterator, ExternalDataset, MappedDatasetIterator
from opendr.perception.object_tracking_2d.logger import Logger
from opendr.perception.object_tracking_2d.datasets.mot_dataset import JointDataset, RawMotDatasetIterator
from opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.models.model import create_model
from opendr.perception.object_tracking_2d.fair_mot.algorithm.run import train, evaluate
from opendr.perception.object_tracking_2d.fair_mot.algorithm.load import load_from_checkpoint
from opendr.perception.object_tracking_2d.datasets.mot_dataset import letterbox, process as process_dataset
from opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.tracker.multitracker import JDETracker
from opendr.engine.data import Image
from opendr.engine.target import TrackingAnnotation, TrackingAnnotationList
from opendr.engine.constants import OPENDR_SERVER_URL
from urllib.request import urlretrieve
class ObjectTracking2DFairMotLearner(Learner):
def __init__(
self,
lr=0.0001,
iters=-1,
batch_size=4,
optimizer="adam",
lr_schedule="",
backbone="dla_34",
network_head="",
checkpoint_after_iter=0,
checkpoint_load_iter=0,
temp_path="",
device="cuda",
threshold=0.3,
scale=1.0,
lr_step=[20],
head_conv=256,
ltrb=True,
num_classes=1,
reg_offset=True,
gpus=[0],
num_workers=4,
mse_loss=False,
reg_loss='l1',
dense_wh=False,
cat_spec_wh=False,
reid_dim=128,
norm_wh=False,
wh_weight=0.1,
off_weight=1,
id_weight=1,
num_epochs=30,
hm_weight=1,
down_ratio=4,
max_objs=500,
track_buffer=30,
image_mean=[0.408, 0.447, 0.47],
image_std=[0.289, 0.274, 0.278],
frame_rate=30,
min_box_area=100,
):
# Pass the shared parameters on super's constructor so they can get initialized as class attributes
super(ObjectTracking2DFairMotLearner, self).__init__(
lr=lr,
iters=iters,
batch_size=batch_size,
optimizer=optimizer,
lr_schedule=lr_schedule,
backbone=backbone,
network_head=network_head,
checkpoint_after_iter=checkpoint_after_iter,
checkpoint_load_iter=checkpoint_load_iter,
temp_path=temp_path,
device=device,
threshold=threshold,
scale=scale,
)
self.ltrb = ltrb
self.head_conv = head_conv
self.num_classes = num_classes
self.reid_dim = reid_dim
self.reg_offset = reg_offset
self.gpus = gpus
self.num_workers = num_workers
self.mse_loss = mse_loss
self.reg_loss = reg_loss
self.dense_wh = dense_wh
self.cat_spec_wh = cat_spec_wh
self.reid_dim = reid_dim
self.norm_wh = norm_wh
self.wh_weight = wh_weight
self.off_weight = off_weight
self.id_weight = id_weight
self.num_epochs = num_epochs
self.lr_step = lr_step
self.hm_weight = hm_weight
self.down_ratio = down_ratio
self.max_objs = max_objs
self.track_buffer = track_buffer
self.image_mean = image_mean
self.image_mean = image_mean
self.image_std = image_std
self.frame_rate = frame_rate
self.min_box_area = min_box_area
main_batch_size = self.batch_size // len(self.gpus)
rest_batch_size = (self.batch_size - main_batch_size)
self.chunk_sizes = [main_batch_size]
for i in range(len(self.gpus) - 1):
worker_chunk_size = rest_batch_size // (len(self.gpus) - 1)
if i < rest_batch_size % (len(self.gpus) - 1):
worker_chunk_size += 1
self.chunk_sizes.append(worker_chunk_size)
self.__create_model()
def save(self, path, verbose=False):
"""
This method is used to save a trained model.
Provided with the path, absolute or relative, including a *folder* name, it creates a directory with the name
of the *folder* provided and saves the model inside with a proper format and a .json file with metadata.
If self.optimize was ran previously, it saves the optimized ONNX model in a similar fashion, by copying it
from the self.temp_path it was saved previously during conversion.
:param path: for the model to be saved, including the folder name
:type path: str
:param verbose: whether to print success message or not, defaults to 'False'
:type verbose: bool, optional
"""
if self.model is None and self.ort_session is None:
raise UserWarning("No model is loaded, cannot save.")
folder_name, _, tail = self.__extract_trailing(path) # Extract trailing folder name from path
# Also extract folder name without any extension if extension is erroneously provided
folder_name_no_ext = folder_name.split(sep='.')[0]
# Extract path without folder name, by removing folder name from original path
path_no_folder_name = ''.join(path.rsplit(folder_name, 1))
# If tail is '', then path was a/b/c/, which leaves a trailing double '/'
if tail == '':
path_no_folder_name = path_no_folder_name[0:-1] # Remove one '/'
# Create model directory
new_path = path_no_folder_name + folder_name_no_ext
os.makedirs(new_path, exist_ok=True)
model_metadata = {"model_paths": [], "framework": "pytorch", "format": "", "has_data": False,
"inference_params": {}, "optimized": None, "optimizer_info": {}}
if self.model.ort_session is None:
model_metadata["model_paths"] = [
folder_name_no_ext + ".pth",
]
model_metadata["optimized"] = False
model_metadata["format"] = "pth"
torch.save({
'state_dict': self.model.state_dict()
}, os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][0]))
if verbose:
print("Saved Pytorch model.")
else:
model_metadata["model_paths"] = [
folder_name_no_ext + ".onnx"
]
model_metadata["optimized"] = True
model_metadata["format"] = "onnx"
shutil.copy2(
os.path.join(self.temp_path, "onnx_model_temp.onnx"),
os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][0])
)
if verbose:
print("Saved ONNX model.")
with open(os.path.join(new_path, folder_name_no_ext + ".json"), 'w') as outfile:
json.dump(model_metadata, outfile)
def load(
self,
path,
verbose=False,
):
"""
Loads the model from inside the path provided, based on the metadata .json file included.
:param path: path of the directory the model was saved
:type path: str
:param verbose: whether to print success message or not, defaults to 'False'
:type verbose: bool, optional
"""
model_name, _, _ = self.__extract_trailing(path) # Trailing folder name from the path provided
with open(os.path.join(path, model_name + ".json")) as metadata_file:
metadata = json.load(metadata_file)
if not metadata["optimized"]:
self.__load_from_pth(self.model, os.path.join(path, metadata["model_paths"][0]))
if verbose:
print("Loaded Pytorch model.")
else:
self.__load_rpn_from_onnx(os.path.join(path, metadata["model_paths"][0]))
if verbose:
print("Loaded ONNX model.")
def reset(self):
self.tracker.reset()
def fit(
self,
dataset,
val_dataset=None,
val_epochs=-1,
logging_path=None,
silent=False,
verbose=False,
train_split_paths=None,
val_split_paths=None,
resume_optimizer=False,
nID=None
):
if train_split_paths is None:
train_split_paths = {
"mot20": os.path.join(
"perception", "object_tracking_2d", "datasets", "splits", "mot20.train"
)
}
if val_split_paths is None:
val_split_paths = train_split_paths
logger = Logger(silent, verbose, logging_path)
(
input_dataset_iterator,
eval_dataset_iterator,
) = self._prepare_datasets(
dataset,
val_dataset,
train_split_paths,
val_split_paths,
require_val_dataset=val_epochs > 0,
)
if nID is None:
nID = input_dataset_iterator.nID if hasattr(input_dataset_iterator, "nID") else dataset.nID
checkpoints_path = os.path.join(self.temp_path, "checkpoints")
if self.checkpoint_after_iter != 0 or self.checkpoint_load_iter != 0:
os.makedirs(checkpoints_path, exist_ok=True)
start_epoch = 0
if self.checkpoint_load_iter != 0:
_, _, start_epoch = load_from_checkpoint(
self.model, os.path.join(checkpoints_path, f"checkpoint_{self.checkpoint_load_iter}.pth"),
self.model_optimizer, resume_optimizer, self.lr, self.lr_step, log=logger.log,
)
last_eval_result = train(
self.model,
self.infer,
self.model_optimizer,
input_dataset_iterator,
eval_dataset_iterator,
self.batch_size,
self.num_workers,
self.gpus,
self.chunk_sizes,
self.iters,
"train", # exp_id,
self.device,
silent, # hide_data_time,
1 if verbose else (-1 if silent else 10), # print_iter,
self.mse_loss,
self.reg_loss,
self.dense_wh,
self.cat_spec_wh,
self.reid_dim,
nID,
self.norm_wh,
1, # num_stack,
self.wh_weight,
self.off_weight,
self.id_weight,
self.num_epochs,
self.lr_step,
self.temp_path,
self.lr,
self.reg_offset,
self.hm_weight,
checkpoints_path,
self.checkpoint_after_iter,
start_epoch,
val_epochs=val_epochs,
log=logger.log,
)
logger.close()
return last_eval_result
def eval(
self,
dataset,
val_split_paths=None,
logging_path=None,
silent=False,
verbose=False,
):
logger = Logger(silent, verbose, logging_path)
(
_,
eval_dataset_iterator,
) = self._prepare_datasets(
None,
dataset,
None,
val_split_paths,
require_dataset=False,
)
result = evaluate(self.infer, dataset)
logger.log(Logger.LOG_WHEN_NORMAL, result)
logger.close()
return result
def infer(self, batch, frame_ids=None, img_size=(1088, 608)):
if self.model is None:
raise ValueError("No model loaded or created")
self.model.eval()
is_single_image = False
if isinstance(batch, Image):
batch = [batch]
is_single_image = True
elif not isinstance(batch, list):
raise ValueError("Input batch should be an engine.Image or a list of engine.Image")
if frame_ids is None:
frame_ids = [-1] * len(batch)
elif is_single_image:
frame_ids = [frame_ids]
results = []
for image, frame_id in zip(batch, frame_ids):
img0 = image.convert("channels_last", "bgr") # BGR
img, _, _, _ = letterbox(img0, height=img_size[1], width=img_size[0])
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
blob = torch.from_numpy(img).to(self.device).unsqueeze(0)
online_targets = self.tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
online_scores = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > self.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
online_scores.append(t.score)
result = TrackingAnnotationList([
TrackingAnnotation(
name=0,
top=tlwh[0],
left=tlwh[1],
width=tlwh[2],
height=tlwh[3],
id=id,
score=score,
frame=frame_id,
) for tlwh, id, score in zip(
online_tlwhs,
online_ids,
online_scores
)
])
results.append(result)
if is_single_image:
results = results[0]
return results
def optimize(self, do_constant_folding=False, img_size=(1088, 608), optimizable_dcn_v2=False):
"""
Optimize method converts the model to ONNX format and saves the
model in the parent directory defined by self.temp_path. The ONNX model is then loaded.
:param do_constant_folding: whether to optimize constants, defaults to 'False'
:type do_constant_folding: bool, optional
"""
if not optimizable_dcn_v2:
raise Exception("Can not optimize the model while DCNv2 implementation is not optimizable")
if self.model is None:
raise UserWarning("No model is loaded, cannot optimize. Load or train a model first.")
if self.model.ort_session is not None:
raise UserWarning("Model is already optimized in ONNX.")
input_shape = [
1,
3,
img_size[1],
img_size[0],
]
try:
self.__convert_to_onnx(
input_shape,
os.path.join(self.temp_path, "onnx_model_temp.onnx"), do_constant_folding
)
except FileNotFoundError:
# Create temp directory
os.makedirs(self.temp_path, exist_ok=True)
self.__convert_rpn_to_onnx(
input_shape,
os.path.join(self.temp_path, "onnx_model_temp.onnx"), do_constant_folding
)
self.__load_rpn_from_onnx(os.path.join(self.temp_path, "onnx_model_rpn_temp.onnx"))
@staticmethod
def download(model_name, path, server_url=None):
if server_url is None and model_name not in [
"crowdhuman_dla34",
"fairmot_dla34",
]:
raise ValueError("Unknown model_name: " + model_name)
os.makedirs(path, exist_ok=True)
if server_url is None:
server_url = os.path.join(
OPENDR_SERVER_URL, "perception", "object_tracking_2d",
"fair_mot"
)
url = os.path.join(
server_url, model_name
)
model_dir = os.path.join(path, model_name)
os.makedirs(model_dir, exist_ok=True)
urlretrieve(os.path.join(
url, model_name + ".json"
), os.path.join(
model_dir, model_name + ".json"
))
try:
urlretrieve(os.path.join(
url, model_name + ".pth"
), os.path.join(
model_dir, model_name + ".pth"
))
except Exception:
urlretrieve(os.path.join(
url, model_name + ".tckpt"
), os.path.join(
model_dir, model_name + ".pth"
))
print("Downloaded model", model_name, "to", model_dir)
return model_dir
def __convert_to_onnx(self, input_shape, output_name, do_constant_folding=False, verbose=False):
inp = torch.randn(input_shape).to(self.device)
input_names = ["data"]
output_names = self.heads.keys()
torch.onnx.export(
self.model, inp, output_name, verbose=verbose, enable_onnx_checker=True,
do_constant_folding=do_constant_folding, input_names=input_names, output_names=output_names
)
def __load_from_onnx(self, path):
"""
This method loads an ONNX model from the path provided into an onnxruntime inference session.
:param path: path to ONNX model
:type path: str
"""
self.model.rpn_ort_session = ort.InferenceSession(path)
# The comments below are the alternative way to use the onnx model, it might be useful in the future
# depending on how ONNX saving/loading will be implemented across the toolkit.
# # Load the ONNX model
# self.model = onnx.load(path)
#
# # Check that the IR is well formed
# onnx.checker.check_model(self.model)
#
# # Print a human readable representation of the graph
# onnx.helper.printable_graph(self.model.graph)
def | (self, model, path, use_original_dict=False):
all_params = torch.load(path, map_location=self.device)
model.load_state_dict(all_params if use_original_dict else all_params["state_dict"])
def _prepare_datasets(
self,
dataset,
val_dataset,
train_split_paths,
val_split_paths,
require_dataset=True,
require_val_dataset=True,
):
input_dataset_iterator = None
eval_dataset_iterator = None
if isinstance(dataset, ExternalDataset):
dataset_path = dataset.path
if dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(dataset) +
") is given as a dataset, but it is not a MOT dataset")
transforms = T.Compose([T.ToTensor()])
input_dataset_iterator = JointDataset(
dataset_path,
train_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
augment=False, transforms=transforms,
)
elif isinstance(dataset, DatasetIterator):
input_dataset_iterator = MappedDatasetIterator(
dataset,
lambda d: process_dataset(
d[0], d[1], self.ltrb, self.down_ratio,
self.max_objs, self.num_classes, self.mse_loss
)
)
else:
if require_dataset or dataset is not None:
raise ValueError(
"dataset parameter should be an ExternalDataset or a DatasetIterator"
)
if isinstance(val_dataset, ExternalDataset):
val_dataset_path = val_dataset.path
if val_dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(val_dataset) +
") is given as a val_dataset, but it is not a MOT dataset"
)
eval_dataset_iterator = RawMotDatasetIterator(
val_dataset_path,
val_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
)
elif isinstance(val_dataset, DatasetIterator):
eval_dataset_iterator = val_dataset
elif val_dataset is None:
if isinstance(dataset, ExternalDataset):
val_dataset_path = dataset.path
if dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(dataset) +
") is given as a dataset, but it is not a MOT dataset"
)
eval_dataset_iterator = RawMotDatasetIterator(
val_dataset_path,
val_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
)
elif require_val_dataset:
raise ValueError(
"val_dataset is None and can't be derived from" +
" the dataset object because the dataset is not an ExternalDataset"
)
else:
eval_dataset_iterator = input_dataset_iterator
else:
raise ValueError(
"val_dataset parameter should be an ExternalDataset or a DatasetIterator or None"
)
return input_dataset_iterator, eval_dataset_iterator
def __create_model(self):
heads = {
'hm': self.num_classes,
'wh': 2 if not self.ltrb else 4,
'id': self.reid_dim
}
if self.reg_offset:
heads.update({'reg': 2})
self.heads = heads
self.model = create_model(self.backbone, heads, self.head_conv)
self.model.to(self.device)
self.model.ort_session = None
self.model.heads_names = heads.keys()
self.model_optimizer = torch.optim.Adam(self.model.parameters(), self.lr)
self.tracker = JDETracker(
self.model,
self.threshold,
self.track_buffer,
self.max_objs,
self.image_mean,
self.image_std,
self.down_ratio,
self.num_classes,
self.reg_offset,
self.ltrb,
self.frame_rate,
)
@staticmethod
def __extract_trailing(path):
"""
Extracts the trailing folder name or filename from a path provided in an OS-generic way, also handling
cases where the last trailing character is a separator. Returns the folder name and the split head and tail.
:param path: the path to extract the trailing filename or folder name from
:type path: str
:return: the folder name, the head and tail of the path
:rtype: tuple of three strings
"""
head, tail = ntpath.split(path)
folder_name = tail or ntpath.basename(head) # handle both a/b/c and a/b/c/
return folder_name, head, tail
| __load_from_pth |
tanh_dnnlowp_op_test.py | import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPTanhOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_tanh(self, size, is_empty, gc, dc):
if is_empty:
size = 0
X = (np.random.rand(size) * 10 - 5).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [("Tanh", ""), ("Tanh", "DNNLOWP"), ("Int8Tanh", "DNNLOWP")]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
if engine == "DNNLOWP":
quantize = core.CreateOperator(
"Quantize",
["X"],
["X_q"],
engine=engine,
device_option=gc,
followed_by="Tanh",
)
net.Proto().op.extend([quantize])
tanh = core.CreateOperator(
op_type, | device_option=gc,
)
net.Proto().op.extend([tanh])
if engine == "DNNLOWP":
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
for o in outputs:
np.testing.assert_allclose(o.Y, outputs[0].Y, atol=0.02, rtol=0) | ["X_q" if engine == "DNNLOWP" else "X"],
["Y_q" if engine == "DNNLOWP" else "Y"],
engine=engine, |
filter-settings.d.ts | import { ChildProperty } from '@syncfusion/ej2-base';
import { PredicateModel, ICustomOptr } from '@syncfusion/ej2-grids';
import { FilterType, FilterHierarchyMode } from '../base/enum';
/**
* Configures the filtering behavior of the Gantt.
*/
export declare class | extends ChildProperty<FilterSettings> {
/**
* Specifies the columns to be filtered at initial rendering of the Gantt.
* You can also get the columns that were currently filtered.
*/
columns: PredicateModel[];
/**
* Defines filter type of Gantt.
* * `Menu` - Enables menu filters in Grid.
*/
type: FilterType;
/**
* The `operators` is used to override the default operators in filter menu. This should be defined by type wise
* (string, number, date and boolean). Based on the column type, this customize operator list will render in filter menu.
*/
operators: ICustomOptr;
/**
* If ignoreAccent set to true, then filter ignores the diacritic characters or accents while filtering.
*/
ignoreAccent: boolean;
/**
* Defines the filter types. The available options are,
* `Parent`: Shows the filtered record with parent record.
* `Child`: Shows the filtered record with child record.
* `Both` : shows the filtered record with both parent and child record.
* `None` : Shows only filtered record.
*/
hierarchyMode: FilterHierarchyMode;
}
| FilterSettings |
test.rs | use serenity::client::Context;
use serenity::framework::standard::{
macros::{command, group},
CommandResult,
};
use serenity::model::channel::Message;
#[group]
#[prefixes("test")]
#[description = "Message Test"]
#[default_command(random_message)]
#[commands(dm, mention_me)]
struct Test;
#[command]
pub async fn random_message(_ctx: &Context, _msg: &Message) -> CommandResult {
// TODO: send a random/static message to channel without mention anyone
Ok(())
}
#[command] | .dm(&ctx, |m| {
m.content("安安");
m
})
.await;
if let Err(why) = dm {
println!("Error when direct messaging user: {:?}", why);
}
Ok(())
}
#[command]
pub async fn mention_me(_ctx: &Context, _msg: &Message) -> CommandResult {
// TODO: mention the original author
Ok(())
} | pub async fn dm(ctx: &Context, msg: &Message) -> CommandResult {
let dm = msg
.author |
grammar.rs | //! This is the actual "grammar" of the Rust language.
//!
//! Each function in this module and its children corresponds
//! to a production of the formal grammar. Submodules roughly
//! correspond to different *areas* of the grammar. By convention,
//! each submodule starts with `use super::*` import and exports
//! "public" productions via `pub(super)`.
//!
//! See docs for `Parser` to learn about API, available to the grammar,
//! and see docs for `Event` to learn how this actually manages to
//! produce parse trees.
//!
//! Code in this module also contains inline tests, which start with
//! `// test name-of-the-test` comment and look like this:
//!
//! ```
//! // test function_with_zero_parameters
//! // fn foo() {}
//! ```
//!
//! After adding a new inline-test, run `cargo test -p xtask` to
//! extract it as a standalone text-fixture into
//! `crates/syntax/test_data/parser/`, and run `cargo test` once to
//! create the "gold" value.
//!
//! Coding convention: rules like `where_clause` always produce either a
//! node or an error, rules like `opt_where_clause` may produce nothing.
//! Non-opt rules typically start with `assert!(p.at(FIRST_TOKEN))`, the
//! caller is responsible for branching on the first token.
mod attributes;
mod expressions;
mod items;
mod params;
mod paths;
mod patterns;
mod type_args;
mod type_params;
mod types;
use crate::{
parser::{CompletedMarker, Marker, Parser},
SyntaxKind::{self, *},
TokenSet,
};
pub(crate) fn root(p: &mut Parser) {
let m = p.start();
p.eat(SHEBANG);
items::mod_contents(p, false);
m.complete(p, SOURCE_FILE);
}
/// Various pieces of syntax that can be parsed by macros by example
pub(crate) mod fragments {
use super::*;
pub(crate) use super::{
expressions::block_expr, paths::type_path as path, patterns::pattern_single, types::type_,
};
pub(crate) fn expr(p: &mut Parser) {
let _ = expressions::expr_with_attrs(p);
}
pub(crate) fn stmt(p: &mut Parser) {
expressions::stmt(p, expressions::StmtWithSemi::No, true)
}
pub(crate) fn stmt_optional_semi(p: &mut Parser) {
expressions::stmt(p, expressions::StmtWithSemi::Optional, false)
}
pub(crate) fn opt_visibility(p: &mut Parser) {
let _ = super::opt_visibility(p);
}
// Parse a meta item , which excluded [], e.g : #[ MetaItem ]
pub(crate) fn meta_item(p: &mut Parser) {
attributes::meta(p);
}
pub(crate) fn item(p: &mut Parser) {
items::item_or_macro(p, true)
}
pub(crate) fn macro_items(p: &mut Parser) {
let m = p.start();
items::mod_contents(p, false);
m.complete(p, MACRO_ITEMS);
}
pub(crate) fn macro_stmts(p: &mut Parser) {
let m = p.start();
while !p.at(EOF) {
if p.at(T![;]) {
p.bump(T![;]);
continue;
}
expressions::stmt(p, expressions::StmtWithSemi::Optional, true);
}
m.complete(p, MACRO_STMTS);
}
pub(crate) fn attr(p: &mut Parser) {
attributes::outer_attrs(p)
}
}
pub(crate) fn reparser(
node: SyntaxKind,
first_child: Option<SyntaxKind>,
parent: Option<SyntaxKind>,
) -> Option<fn(&mut Parser)> {
let res = match node {
BLOCK_EXPR => expressions::block_expr,
RECORD_FIELD_LIST => items::record_field_list,
RECORD_EXPR_FIELD_LIST => items::record_expr_field_list,
VARIANT_LIST => items::variant_list,
MATCH_ARM_LIST => items::match_arm_list,
USE_TREE_LIST => items::use_tree_list,
EXTERN_ITEM_LIST => items::extern_item_list,
TOKEN_TREE if first_child? == T!['{'] => items::token_tree,
ASSOC_ITEM_LIST => match parent? {
IMPL => items::assoc_item_list,
TRAIT => items::assoc_item_list,
_ => return None,
},
ITEM_LIST => items::item_list,
_ => return None,
};
Some(res)
}
#[derive(Clone, Copy, PartialEq, Eq)]
enum BlockLike {
Block,
NotBlock,
}
impl BlockLike {
fn is_block(self) -> bool {
self == BlockLike::Block
}
}
fn opt_visibility(p: &mut Parser) -> bool {
match p.current() {
T![pub] => {
let m = p.start();
p.bump(T![pub]);
if p.at(T!['(']) {
match p.nth(1) {
// test crate_visibility
// pub(crate) struct S;
// pub(self) struct S;
// pub(super) struct S;
// test pub_parens_typepath
// struct B(pub (super::A));
// struct B(pub (crate::A,));
T![crate] | T![self] | T![super] if p.nth(2) != T![:] => {
p.bump_any();
let path_m = p.start();
let path_segment_m = p.start();
let name_ref_m = p.start();
p.bump_any();
name_ref_m.complete(p, NAME_REF);
path_segment_m.complete(p, PATH_SEGMENT);
path_m.complete(p, PATH);
p.expect(T![')']);
}
// test crate_visibility_in
// pub(in super::A) struct S;
// pub(in crate) struct S;
T![in] => {
p.bump_any();
p.bump_any();
paths::use_path(p);
p.expect(T![')']);
}
_ => (),
}
}
m.complete(p, VISIBILITY);
}
// test crate_keyword_vis
// crate fn main() { }
// struct S { crate field: u32 }
// struct T(crate u32);
//
// test crate_keyword_path
// fn foo() { crate::foo(); }
T![crate] if !p.nth_at(1, T![::]) => {
let m = p.start(); | }
true
}
fn opt_rename(p: &mut Parser) {
if p.at(T![as]) {
let m = p.start();
p.bump(T![as]);
if !p.eat(T![_]) {
name(p);
}
m.complete(p, RENAME);
}
}
fn abi(p: &mut Parser) {
assert!(p.at(T![extern]));
let abi = p.start();
p.bump(T![extern]);
p.eat(STRING);
abi.complete(p, ABI);
}
fn opt_ret_type(p: &mut Parser) -> bool {
if p.at(T![->]) {
let m = p.start();
p.bump(T![->]);
types::type_no_bounds(p);
m.complete(p, RET_TYPE);
true
} else {
false
}
}
fn name_r(p: &mut Parser, recovery: TokenSet) {
if p.at(IDENT) {
let m = p.start();
p.bump(IDENT);
m.complete(p, NAME);
} else {
p.err_recover("expected a name", recovery);
}
}
fn name(p: &mut Parser) {
name_r(p, TokenSet::EMPTY)
}
fn name_ref(p: &mut Parser) {
if p.at(IDENT) {
let m = p.start();
p.bump(IDENT);
m.complete(p, NAME_REF);
} else {
p.err_and_bump("expected identifier");
}
}
fn name_ref_or_index(p: &mut Parser) {
assert!(p.at(IDENT) || p.at(INT_NUMBER));
let m = p.start();
p.bump_any();
m.complete(p, NAME_REF);
}
fn lifetime(p: &mut Parser) {
assert!(p.at(LIFETIME_IDENT));
let m = p.start();
p.bump(LIFETIME_IDENT);
m.complete(p, LIFETIME);
}
fn error_block(p: &mut Parser, message: &str) {
assert!(p.at(T!['{']));
let m = p.start();
p.error(message);
p.bump(T!['{']);
expressions::expr_block_contents(p);
p.eat(T!['}']);
m.complete(p, ERROR);
} | p.bump(T![crate]);
m.complete(p, VISIBILITY);
}
_ => return false, |
utils.py | import os
import shutil
import tempfile
import unittest
# disable for stestr otherwise output is much too verbose
from hotsos.core.log import log, logging, setup_logging
from hotsos.core.config import setup_config
# Must be set prior to other imports
TESTS_DIR = os.environ["TESTS_DIR"]
DEFAULT_FAKE_ROOT = 'fake_data_root/openstack'
setup_config(DATA_ROOT=os.path.join(TESTS_DIR, DEFAULT_FAKE_ROOT))
def is_def_filter(def_filename):
"""
Filter hotsos.core.ycheck.YDefsLoader._is_def to only match a file with the
given name. This permits a unit test to only run the ydef checks that are
under test.
Note that in order for directory globals to run def_filename must be a
relative path that includes the parent directory name e.g. foo/bar.yaml
where bar contains the checks and there is also a file called foo/foo.yaml
that contains directory globals.
"""
def inner(_inst, abs_path):
# filename may optionally have a parent dir which allows us to permit
# directory globals to be run.
parent_dir = os.path.dirname(def_filename)
""" Ensure we only load/run the yaml def with the given name. """
if parent_dir:
# allow directory global to run
base_dir = os.path.basename(os.path.dirname(abs_path))
if base_dir != parent_dir:
return False
if os.path.basename(abs_path) == "{}.yaml".format(parent_dir):
return True
if abs_path.endswith(def_filename):
return True
return False
return inner
class BaseTestCase(unittest.TestCase):
def part_output_to_actual(self, output):
actual = {}
for key, entry in output.items():
actual[key] = entry.data
return actual
def setUp(self):
|
def tearDown(self):
if os.path.isdir(self.plugin_tmp_dir):
shutil.rmtree(self.plugin_tmp_dir)
| self.maxDiff = None
# ensure locale consistency wherever tests are run
os.environ["LANG"] = 'C.UTF-8'
self.global_tmp_dir = tempfile.mkdtemp()
self.plugin_tmp_dir = tempfile.mkdtemp(dir=self.global_tmp_dir)
# Always reset env globals
# If a test relies on loading info from defs yaml this needs to be set
# to actual plugin name.
setup_config(DATA_ROOT=os.path.join(TESTS_DIR, DEFAULT_FAKE_ROOT),
PLUGIN_NAME="testplugin",
PLUGIN_YAML_DEFS=os.path.join(TESTS_DIR, "defs"),
PART_NAME="01part",
GLOBAL_TMP_DIR=self.global_tmp_dir,
PLUGIN_TMP_DIR=self.plugin_tmp_dir,
USE_ALL_LOGS=True)
setup_logging(debug_mode=True)
log.setLevel(logging.INFO) |
get.rs | use crate::cmd::CmdHandling;
use crate::io::conf::Config;
use clap::Parser;
#[derive(Parser)]
pub struct ConfGetCmd {
key: Option<String>,
}
impl CmdHandling for ConfGetCmd {
fn handle(&self, config: &Config) -> Result<String, String> {
if let Some(key) = &self.key {
Ok(format!("ConfGet ran to completion with key {}", key))
} else |
}
}
| {
Ok(format!("ConfGet fetched config: {:?}", config))
} |
dask.py | """Support for Eliot tracing with Dask computations."""
from pyrsistent import PClass, field
from dask import compute, optimize
from dask.core import toposort, get_dependencies
from . import start_action, current_action, Action, Message
class _RunWithEliotContext(PClass):
"""
Run a callable within an Eliot context.
@ivar task_id: The serialized Eliot task ID.
@ivar func: The function that Dask wants to run.
@ivar key: The key in the Dask graph.
@ivar dependencies: The keys in the Dask graph this depends on.
"""
task_id = field(type=str)
func = field() # callable
key = field(type=str)
dependencies = field()
# Pretend to be underlying callable for purposes of equality; necessary for
# optimizer to be happy:
def __eq__(self, other):
return self.func == other
def __ne__(self, other):
return self.func != other
def __hash__(self):
return hash(self.func)
def __call__(self, *args, **kwargs):
with Action.continue_task(task_id=self.task_id):
Message.log(
message_type="dask:task",
key=self.key,
dependencies=self.dependencies
)
return self.func(*args, **kwargs)
def compute_with_trace(*args):
"""Do Dask compute(), but with added Eliot tracing.
Dask is a graph of tasks, but Eliot logs trees. So we need to emulate a
graph using a tree. We do this by making Eliot action for each task, but | 1. Create a top-level action.
2. For each entry in the dask graph, create a child with
serialize_task_id. Do this in likely order of execution, so that
if B depends on A the task level of B is higher than the task Ievel
of A.
3. Replace each function with a wrapper that uses the corresponding
task ID (with Action.continue_task), and while it's at it also
records which other things this function depends on.
Known issues:
1. Retries will confuse Eliot. Probably need different
distributed-tree mechanism within Eliot to solve that.
"""
# 1. Create top-level Eliot Action:
with start_action(action_type="dask:compute"):
# In order to reduce logging verbosity, add logging to the already
# optimized graph:
optimized = optimize(*args, optimizations=[_add_logging])
return compute(*optimized, optimize_graph=False)
def _add_logging(dsk, ignore=None):
"""
Add logging to a Dask graph.
@param dsk: The Dask graph.
@return: New Dask graph.
"""
ctx = current_action()
result = {}
# Use topological sort to ensure Eliot actions are in logical order of
# execution in Dask:
keys = toposort(dsk)
# Give each key a string name. Some keys are just aliases to other
# keys, so make sure we have underlying key available. Later on might
# want to shorten them as well.
def simplify(k):
if isinstance(k, str):
return k
return "-".join(str(o) for o in k)
key_names = {}
for key in keys:
value = dsk[key]
if not callable(value) and value in keys:
# It's an alias for another key:
key_names[key] = key_names[value]
else:
key_names[key] = simplify(key)
# 2. Create Eliot child Actions for each key, in topological order:
key_to_action_id = {
key: str(ctx.serialize_task_id(), "utf-8")
for key in keys
}
# 3. Replace function with wrapper that logs appropriate Action:
for key in keys:
func = dsk[key][0]
args = dsk[key][1:]
if not callable(func):
# This key is just an alias for another key, no need to add
# logging:
result[key] = dsk[key]
continue
wrapped_func = _RunWithEliotContext(
task_id=key_to_action_id[key],
func=func,
key=key_names[key],
dependencies=[key_names[k] for k in get_dependencies(dsk, key)],
)
result[key] = (wrapped_func, ) + tuple(args)
assert result.keys() == dsk.keys()
return result
__all__ = ["compute_with_trace"] | having it list the tasks it depends on.
We use the following algorithm:
|
taskbehavior.go | package simple
import (
"github.com/project-flogo/core/activity"
"github.com/project-flogo/flow/definition"
"github.com/project-flogo/flow/model"
)
////////////////////////////////////////////////////////////////////////////////////////////////////////
// TaskBehavior implements model.TaskBehavior
type TaskBehavior struct {
}
// Enter implements model.TaskBehavior.Enter
func (tb *TaskBehavior) Enter(ctx model.TaskContext) (enterResult model.EnterResult) {
logger := ctx.FlowLogger()
task := ctx.Task()
if logger.DebugEnabled() {
logger.Debugf("Enter Task '%s'", task.ID())
}
ctx.SetStatus(model.TaskStatusEntered)
//check if all predecessor links are done
linkInsts := ctx.GetFromLinkInstances()
ready := true
skipped := false
if len(linkInsts) == 0 {
// has no predecessor links, so task is ready
ready = true
} else {
skipped = true
if logger.DebugEnabled() {
logger.Debugf("Task '%s' has %d incoming Links", task.ID(), len(linkInsts))
}
for _, linkInst := range linkInsts {
if logger.DebugEnabled() {
logger.Debugf("Task '%s': Link from Task '%s' has status '%s'", task.ID(), linkInst.Link().FromTask().ID(), linkStatus(linkInst))
}
if linkInst.Status() < model.LinkStatusFalse {
ready = false
break
} else if linkInst.Status() == model.LinkStatusTrue {
skipped = false
}
}
}
if ready {
if skipped {
ctx.SetStatus(model.TaskStatusSkipped)
return model.EnterSkip
} else {
if logger.DebugEnabled() {
logger.Debugf("Task '%s' Ready", ctx.Task().ID())
}
ctx.SetStatus(model.TaskStatusReady)
}
return model.EnterEval
} else {
if logger.DebugEnabled() {
logger.Debugf("Task '%s' Not Ready", ctx.Task().ID())
}
}
return model.EnterNotReady
}
// Eval implements model.TaskBehavior.Eval
func (tb *TaskBehavior) Eval(ctx model.TaskContext) (evalResult model.EvalResult, err error) {
if ctx.Status() == model.TaskStatusSkipped {
return model.EvalSkip, nil
}
task := ctx.Task()
ctx.FlowLogger().Debugf("Eval Task '%s'", task.ID())
done, err := evalActivity(ctx)
if err != nil {
ref := activity.GetRef(ctx.Task().ActivityConfig().Activity)
ctx.FlowLogger().Errorf("Error evaluating activity '%s'[%s] - %s", ctx.Task().ID(), ref, err.Error())
ctx.SetStatus(model.TaskStatusFailed)
return model.EvalFail, err
}
if done | else {
evalResult = model.EvalWait
}
return evalResult, nil
}
func evalActivity(ctx model.TaskContext) (bool, error) {
done, err := ctx.EvalActivity()
if err != nil {
// check if error returned is retriable
if errVal, ok := err.(*activity.Error); ok && errVal.Retriable() {
// check if task is configured to retry on error
retryData, rerr := getRetryData(ctx)
if rerr != nil {
return done, rerr
}
if retryData != nil && retryData.Count > 0 {
return retryEval(ctx, retryData)
}
}
return done, err
}
return done, nil
}
// PostEval implements model.TaskBehavior.PostEval
func (tb *TaskBehavior) PostEval(ctx model.TaskContext) (evalResult model.EvalResult, err error) {
ctx.FlowLogger().Debugf("PostEval Task '%s'", ctx.Task().ID())
_, err = ctx.PostEvalActivity()
if err != nil {
//// check if error returned is retriable
//if errVal, ok := err.(*activity.Error); ok && errVal.Retriable() {
// // check if task is configured to retry on error
// retryData, rerr := getRetryData(ctx)
// if rerr != nil {
// return model.EvalFail, rerr
// }
// if retryData.Count > 0 {
// return retryPostEval(ctx, retryData), nil
// }
//}
ref := activity.GetRef(ctx.Task().ActivityConfig().Activity)
ctx.FlowLogger().Errorf("Error post evaluating activity '%s'[%s] - %s", ctx.Task().ID(), ref, err.Error())
ctx.SetStatus(model.TaskStatusFailed)
return model.EvalFail, err
}
return model.EvalDone, nil
}
// Done implements model.TaskBehavior.Done
func (tb *TaskBehavior) Done(ctx model.TaskContext) (notifyFlow bool, taskEntries []*model.TaskEntry, err error) {
logger := ctx.FlowLogger()
linkInsts := ctx.GetToLinkInstances()
numLinks := len(linkInsts)
ctx.SetStatus(model.TaskStatusDone)
if logger.DebugEnabled() {
logger.Debugf("Task '%s' is done", ctx.Task().ID())
}
// process outgoing links
if numLinks > 0 {
taskEntries = make([]*model.TaskEntry, 0, numLinks)
if logger.DebugEnabled() {
logger.Debugf("Task '%s' has %d outgoing links", ctx.Task().ID(), numLinks)
}
var exprLinkFollowed, hasExprLink bool
var exprOtherwiseLinkInst model.LinkInstance
for _, linkInst := range linkInsts {
follow := true
if linkInst.Link().Type() == definition.LtError {
//todo should we skip or ignore?
continue
}
if linkInst.Link().Type() == definition.LtExprOtherwise {
exprOtherwiseLinkInst = linkInst
continue
}
if linkInst.Link().Type() == definition.LtExpression {
hasExprLink = true
//todo handle error
if logger.DebugEnabled() {
logger.Debugf("Task '%s': Evaluating Outgoing Expression Link to Task '%s'", ctx.Task().ID(), linkInst.Link().ToTask().ID())
}
follow, err = ctx.EvalLink(linkInst.Link())
if err != nil {
return false, nil, err
}
exprLinkFollowed = follow
}
if follow {
linkInst.SetStatus(model.LinkStatusTrue)
if logger.DebugEnabled() {
logger.Debugf("Task '%s': Following Link to task '%s'", ctx.Task().ID(), linkInst.Link().ToTask().ID())
}
taskEntry := &model.TaskEntry{Task: linkInst.Link().ToTask()}
taskEntries = append(taskEntries, taskEntry)
} else {
linkInst.SetStatus(model.LinkStatusFalse)
taskEntry := &model.TaskEntry{Task: linkInst.Link().ToTask()}
taskEntries = append(taskEntries, taskEntry)
}
}
//Otherwise branch while no link to follow
if hasExprLink && !exprLinkFollowed && exprOtherwiseLinkInst != nil {
exprOtherwiseLinkInst.SetStatus(model.LinkStatusTrue)
if logger.DebugEnabled() {
logger.Debugf("Task '%s': Following Link to task '%s'", ctx.Task().ID(), exprOtherwiseLinkInst.Link().ToTask().ID())
}
taskEntry := &model.TaskEntry{Task: exprOtherwiseLinkInst.Link().ToTask()}
taskEntries = append(taskEntries, taskEntry)
}
//continue on to successor tasks
return false, taskEntries, nil
}
if logger.DebugEnabled() {
logger.Debugf("Notifying flow that end task '%s' is done", ctx.Task().ID())
}
// there are no outgoing links, so just notify parent that we are done
return true, nil, nil
}
// Skip implements model.TaskBehavior.Skip
func (tb *TaskBehavior) Skip(ctx model.TaskContext) (notifyFlow bool, taskEntries []*model.TaskEntry) {
linkInsts := ctx.GetToLinkInstances()
numLinks := len(linkInsts)
ctx.SetStatus(model.TaskStatusSkipped)
logger := ctx.FlowLogger()
if logger.DebugEnabled() {
logger.Debugf("Task '%s' was skipped", ctx.Task().ID())
}
// process outgoing links
if numLinks > 0 {
taskEntries = make([]*model.TaskEntry, 0, numLinks)
if logger.DebugEnabled() {
logger.Debugf("Task '%s' has %d outgoing links", ctx.Task().ID(), numLinks)
}
for _, linkInst := range linkInsts {
linkInst.SetStatus(model.LinkStatusSkipped)
taskEntry := &model.TaskEntry{Task: linkInst.Link().ToTask()}
taskEntries = append(taskEntries, taskEntry)
}
return false, taskEntries
}
if logger.DebugEnabled() {
logger.Debugf("Notifying flow that end task '%s' is skipped", ctx.Task().ID())
}
return true, nil
}
// Error implements model.TaskBehavior.Error
func (tb *TaskBehavior) Error(ctx model.TaskContext, err error) (handled bool, taskEntries []*model.TaskEntry) {
linkInsts := ctx.GetToLinkInstances()
numLinks := len(linkInsts)
handled = false
// process outgoing links
if numLinks > 0 {
for _, linkInst := range linkInsts {
if linkInst.Link().Type() == definition.LtError {
handled = true
break
}
}
if handled {
taskEntries = make([]*model.TaskEntry, 0, numLinks)
for _, linkInst := range linkInsts {
if linkInst.Link().Type() == definition.LtError {
linkInst.SetStatus(model.LinkStatusTrue)
} else {
linkInst.SetStatus(model.LinkStatusFalse)
}
taskEntry := &model.TaskEntry{Task: linkInst.Link().ToTask()}
taskEntries = append(taskEntries, taskEntry)
}
return true, taskEntries
}
}
return false, nil
}
func linkStatus(inst model.LinkInstance) string {
switch inst.Status() {
case model.LinkStatusFalse:
return "false"
case model.LinkStatusTrue:
return "true"
case model.LinkStatusSkipped:
return "skipped"
}
return "unknown"
}
| {
evalResult = model.EvalDone
} |
gesture.py | from typing import List
from arm_prosthesis.models.gesture_action import GestureAction
class Gesture:
def __init__(self, uuid: str, name: str, last_time_sync: int, iterable: bool, repetitions: int,
actions: List[GestureAction]):
self._uuid = uuid
self._name = name
self._last_time_sync = last_time_sync
self._iterable = iterable
self._repetitions = repetitions
self._actions = actions
@property
def uuid(self) -> str:
return self._uuid
@property
| def last_time_sync(self) -> int:
return self._last_time_sync
@property
def iterable(self) -> bool:
return self._iterable
@property
def repetitions(self) -> int:
return self._repetitions
@property
def actions(self) -> List[GestureAction]:
return self._actions | def name(self) -> str:
return self._name
@property
|
feedback_pipeline.py | #!/usr/bin/python3
import argparse, yaml, tempfile, os, subprocess, json, jinja2, datetime, copy, re, dnf, pprint, urllib.request, sys, koji
import concurrent.futures
import rpm_showme as showme
from functools import lru_cache
import multiprocessing, asyncio
# Features of this new release
# - multiarch from the ground up!
# - more resilient
# - better internal data structure
# - user-defined views
###############################################################################
### Help ######################################################################
###############################################################################
# Configs:
# TYPE: KEY: ID:
# - repo repos repo_id
# - env_conf envs env_id
# - workload_conf workloads workload_id
# - label labels label_id
# - conf_view views view_id
#
# Data:
# TYPE: KEY: ID:
# - pkg pkgs/repo_id/arch NEVR
# - env envs env_id:repo_id:arch_id
# - workload workloads workload_id:env_id:repo_id:arch_id
# - view views view_id:repo_id:arch_id
#
#
#
###############################################################################
### Some initial stuff ########################################################
###############################################################################
class SettingsError(Exception):
# Error in global settings for Feedback Pipeline
# Settings to be implemented, now hardcoded below
pass
class ConfigError(Exception):
# Error in user-provided configs
pass
class RepoDownloadError(Exception):
# Error in downloading repodata
pass
class BuildGroupAnalysisError(Exception):
# Error while processing buildroot build group
pass
class KojiRootLogError(Exception):
pass
class AnalysisError(Exception):
pass
def log(msg):
print(msg, file=sys.stderr)
def err_log(msg):
print("ERROR LOG: {}".format(msg), file=sys.stderr)
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, jinja2.Environment):
return ""
return json.JSONEncoder.default(self, obj)
def dump_data(path, data):
with open(path, 'w') as file:
json.dump(data, file, cls=SetEncoder)
def load_data(path):
with open(path, 'r') as file:
data = json.load(file)
return data
def size(num, suffix='B'):
for unit in ['','k','M','G']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'T', suffix)
def pkg_id_to_name(pkg_id):
pkg_name = pkg_id.rsplit("-",2)[0]
return pkg_name
def workload_id_to_conf_id(workload_id):
workload_conf_id = workload_id.split(":")[0]
return workload_conf_id
def pkg_placeholder_name_to_id(placeholder_name):
placeholder_id = "{name}-000-placeholder.placeholder".format(name=placeholder_name)
return placeholder_id
def pkg_placeholder_name_to_nevr(placeholder_name):
placeholder_id = "{name}-000-placeholder".format(name=placeholder_name)
return placeholder_id
def url_to_id(url):
# strip the protocol
if url.startswith("https://"):
url = url[8:]
elif url.startswith("http://"):
url = url[7:]
# strip a potential leading /
if url.endswith("/"):
url = url[:-1]
# and replace all non-alphanumeric characters with -
regex = re.compile('[^0-9a-zA-Z]')
return regex.sub("-", url)
def datetime_now_string():
return datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
def load_settings():
settings = {}
parser = argparse.ArgumentParser()
parser.add_argument("configs", help="Directory with YAML configuration files. Only files ending with '.yaml' are accepted.")
parser.add_argument("output", help="Directory to contain the output.")
parser.add_argument("--use-cache", dest="use_cache", action='store_true', help="Use local data instead of pulling Content Resolver. Saves a lot of time! Needs a 'cache_data.json' file at the same location as the script is at.")
parser.add_argument("--dev-buildroot", dest="dev_buildroot", action='store_true', help="Buildroot grows pretty quickly. Use a fake one for development.")
parser.add_argument("--dnf-cache-dir", dest="dnf_cache_dir_override", help="Override the dnf cache_dir.")
args = parser.parse_args()
settings["configs"] = args.configs
settings["output"] = args.output
settings["use_cache"] = args.use_cache
settings["dev_buildroot"] = args.dev_buildroot
settings["dnf_cache_dir_override"] = args.dnf_cache_dir_override
settings["root_log_deps_cache_path"] = "cache_root_log_deps.json"
settings["max_subprocesses"] = 10
settings["allowed_arches"] = ["armv7hl","aarch64","ppc64le","s390x","x86_64"]
settings["weird_packages_that_can_not_be_installed"] = ["glibc32"]
settings["repos"] = {
"appstream": ["aarch64", "ppc64le", "s390x", "x86_64"],
"baseos": ["aarch64", "ppc64le", "s390x", "x86_64"],
"crb": ["aarch64", "ppc64le", "s390x", "x86_64"],
"addon-ha": ["aarch64", "ppc64le", "s390x", "x86_64"],
"addon-nfv": ["x86_64"],
"addon-rt": ["x86_64"],
"addon-rs": ["ppc64le", "s390x", "x86_64"],
"addon-sap": ["ppc64le", "s390x", "x86_64"],
"addon-saphana": ["ppc64le", "x86_64"]
}
settings["addons"] = ["addon-ha", "addon-nfv", "addon-rt", "addon-rs", "addon-sap", "addon-saphana"]
return settings
###############################################################################
### Loading user-provided configs #############################################
###############################################################################
# Configs:
# TYPE: KEY: ID:
# - repo repos repo_id
# - env envs env_id
# - workload workloads workload_id
# - label labels label_id
# - view views view_id
def _load_config_repo(document_id, document, settings):
raise NotImplementedError("Repo v1 is not supported. Please migrate to repo v2.")
def _load_config_repo_v2(document_id, document, settings):
config = {}
config["id"] = document_id
# Step 1: Mandatory fields
try:
# Name is an identifier for humans
config["name"] = str(document["data"]["name"])
# A short description, perhaps hinting the purpose
config["description"] = str(document["data"]["description"])
# Who maintains it? This is just a freeform string
# for humans to read. In Fedora, a FAS nick is recommended.
config["maintainer"] = str(document["data"]["maintainer"])
# Where does this repository come from?
# Right now, only Fedora repositories are supported,
# defined by their releasever.
config["source"] = {}
config["source"]["repos"] = {}
if "repos" not in config["source"]:
raise KeyError
# Only Fedora repos supported at this time.
# Fedora release.
config["source"]["releasever"] = str(document["data"]["source"]["releasever"])
# List of architectures
config["source"]["architectures"] = []
for arch_raw in document["data"]["source"]["architectures"]:
arch = str(arch_raw)
if arch not in settings["allowed_arches"]:
log(" Warning: {file}.yaml lists an unsupported architecture: {arch}. Moving on...".format(
file=document_id,
arch=arch))
continue
config["source"]["architectures"].append(str(arch))
except KeyError:
raise ConfigError("'{file}.yaml' - There's something wrong with the mandatory fields. Sorry I don't have more specific info.".format(file=document_id))
for id, repo_data in document["data"]["source"]["repos"].items():
name = repo_data.get("name", id)
priority = repo_data.get("priority", 100)
exclude = repo_data.get("exclude", None)
limit_arches = repo_data.get("limit_arches", None)
koji_api_url = repo_data.get("koji_api_url", None)
koji_files_url = repo_data.get("koji_files_url", None)
config["source"]["repos"][id] = {}
config["source"]["repos"][id]["id"] = id
config["source"]["repos"][id]["name"] = name
try:
config["source"]["repos"][id]["baseurl"] = repo_data["baseurl"]
except KeyError:
raise ConfigError("'{file}.yaml' - is invalid. Repo {id} doesn't list baseurl.".format(
file=yml_file,
id=id))
config["source"]["repos"][id]["priority"] = priority
config["source"]["repos"][id]["exclude"] = exclude
config["source"]["repos"][id]["limit_arches"] = limit_arches
config["source"]["repos"][id]["koji_api_url"] = koji_api_url
config["source"]["repos"][id]["koji_files_url"] = koji_files_url
# Step 2: Optional fields
config["source"]["composeinfo"] = document["data"]["source"].get("composeinfo", None)
config["source"]["base_buildroot_override"] = []
if "base_buildroot_override" in document["data"]["source"]:
for pkg_name in document["data"]["source"]["base_buildroot_override"]:
config["source"]["base_buildroot_override"].append(str(pkg_name))
return config
def _load_config_env(document_id, document, settings):
config = {}
config["id"] = document_id
# Step 1: Mandatory fields
try:
# Name is an identifier for humans
config["name"] = str(document["data"]["name"])
# A short description, perhaps hinting the purpose
config["description"] = str(document["data"]["description"])
# Who maintains it? This is just a freeform string
# for humans to read. In Fedora, a FAS nick is recommended.
config["maintainer"] = str(document["data"]["maintainer"])
# Different instances of the environment, one per repository.
config["repositories"] = []
for repo in document["data"]["repositories"]:
config["repositories"].append(str(repo))
# Packages defining this environment.
# This list includes packages for all
# architectures — that's the one to use by default.
config["packages"] = []
for pkg in document["data"]["packages"]:
config["packages"].append(str(pkg))
# Labels connect things together.
# Workloads get installed in environments with the same label.
# They also get included in views with the same label.
config["labels"] = []
for repo in document["data"]["labels"]:
config["labels"].append(str(repo))
except KeyError:
raise ConfigError("'{file}.yaml' - There's something wrong with the mandatory fields. Sorry I don't have more specific info.".format(file=document_id))
# Step 2: Optional fields
# Architecture-specific packages.
config["arch_packages"] = {}
for arch in settings["allowed_arches"]:
config["arch_packages"][arch] = []
if "arch_packages" in document["data"]:
for arch, pkgs in document["data"]["arch_packages"].items():
if arch not in settings["allowed_arches"]:
log(" Warning: {file}.yaml lists an unsupported architecture: {arch}. Moving on...".format(
file=document_id,
arch=arch
))
continue
for pkg_raw in pkgs:
pkg = str(pkg_raw)
config["arch_packages"][arch].append(pkg)
# Extra installation options.
# The following are now supported:
# - "include-docs" - include documentation packages
# - "include-weak-deps" - automatically pull in "recommends" weak dependencies
config["options"] = []
if "options" in document["data"]:
if "include-docs" in document["data"]["options"]:
config["options"].append("include-docs")
if "include-weak-deps" in document["data"]["options"]:
config["options"].append("include-weak-deps")
# Comps groups
config["groups"] = []
if "groups" in document["data"]:
for module in document["data"]["groups"]:
config["groups"].append(module)
return config
def _load_config_workload(document_id, document, settings):
config = {}
config["id"] = document_id
# Step 1: Mandatory fields
try:
# Name is an identifier for humans
config["name"] = str(document["data"]["name"])
# A short description, perhaps hinting the purpose
config["description"] = str(document["data"]["description"])
# Who maintains it? This is just a freeform string
# for humans to read. In Fedora, a FAS nick is recommended.
config["maintainer"] = str(document["data"]["maintainer"])
# Labels connect things together.
# Workloads get installed in environments with the same label.
# They also get included in views with the same label.
config["labels"] = []
for repo in document["data"]["labels"]:
config["labels"].append(str(repo))
except KeyError:
raise ConfigError("'{file}.yaml' - There's something wrong with the mandatory fields. Sorry I don't have more specific info.".format(file=document_id))
# Step 2: Optional fields
# Packages defining this workload.
# This list includes packages for all
# architectures — that's the one to use by default.
config["packages"] = []
# This workaround allows for "packages" to be left empty in the config
try:
for pkg in document["data"]["packages"]:
config["packages"].append(str(pkg))
except (TypeError, KeyError):
pass # Because it's now valid
#log(" Warning: {file} has an empty 'packages' field defined which is invalid. Moving on...".format(
# file=document_id
#))
# Architecture-specific packages.
config["arch_packages"] = {}
for arch in settings["allowed_arches"]:
config["arch_packages"][arch] = []
if "arch_packages" in document["data"]:
for arch, pkgs in document["data"]["arch_packages"].items():
if arch not in settings["allowed_arches"]:
log(" Warning: {file}.yaml lists an unsupported architecture: {arch}. Moving on...".format(
file=document_id,
arch=arch
))
continue
# This workaround allows for "arch_packages/ARCH" to be left empty in the config
try:
for pkg_raw in pkgs:
pkg = str(pkg_raw)
config["arch_packages"][arch].append(pkg)
except TypeError:
log(" Warning: {file} has an empty 'arch_packages/{arch}' field defined which is invalid. Moving on...".format(
file=document_id,
arch=arch
))
# Extra installation options.
# The following are now supported:
# - "include-docs" - include documentation packages
# - "include-weak-deps" - automatically pull in "recommends" weak dependencies
config["options"] = []
if "options" in document["data"]:
if "include-docs" in document["data"]["options"]:
config["options"].append("include-docs")
if "include-weak-deps" in document["data"]["options"]:
config["options"].append("include-weak-deps")
if "strict" in document["data"]["options"]:
config["options"].append("strict")
# Disable module streams.
config["modules_disable"] = []
if "modules_disable" in document["data"]:
for module in document["data"]["modules_disable"]:
config["modules_disable"].append(module)
# Enable module streams.
config["modules_enable"] = []
if "modules_enable" in document["data"]:
for module in document["data"]["modules_enable"]:
config["modules_enable"].append(module)
# Comps groups
config["groups"] = []
if "groups" in document["data"]:
for module in document["data"]["groups"]:
config["groups"].append(module)
# Package placeholders
# Add packages to the workload that don't exist (yet) in the repositories.
config["package_placeholders"] = {}
config["package_placeholders"]["pkgs"] = {}
config["package_placeholders"]["srpms"] = {}
if "package_placeholders" in document["data"]:
# So yeah, this is kind of awful but also brilliant.
# The old syntax of package placeholders was a dict,
# but the new one is a list.
# So I can be backwards compatible!
#
# The old format
if isinstance(document["data"]["package_placeholders"], dict):
for pkg_name, pkg_data in document["data"]["package_placeholders"].items():
pkg_description = pkg_data.get("description", "Description not provided.")
pkg_requires = pkg_data.get("requires", [])
pkg_buildrequires = pkg_data.get("buildrequires", [])
limit_arches = pkg_data.get("limit_arches", None)
srpm = pkg_data.get("srpm", pkg_name)
config["package_placeholders"]["pkgs"][pkg_name] = {}
config["package_placeholders"]["pkgs"][pkg_name]["name"] = pkg_name
config["package_placeholders"]["pkgs"][pkg_name]["description"] = pkg_description
config["package_placeholders"]["pkgs"][pkg_name]["requires"] = pkg_requires
config["package_placeholders"]["pkgs"][pkg_name]["limit_arches"] = limit_arches
config["package_placeholders"]["pkgs"][pkg_name]["srpm"] = srpm
# Because the old format isn't great, it needs a srpm
# to be defined for every rpm, including the build requires.
# That can cause conflicts.
# So the best thing (I think) is to just take the first one and ignore
# the others. This is better than nothing. And people should move
# to the new format anyway.
if srpm not in config["package_placeholders"]["srpms"]:
config["package_placeholders"]["srpms"][srpm] = {}
config["package_placeholders"]["srpms"][srpm]["name"] = srpm
config["package_placeholders"]["srpms"][srpm]["buildrequires"] = pkg_buildrequires
config["package_placeholders"]["srpms"][srpm]["limit_arches"] = limit_arches
#
# The new format
elif isinstance(document["data"]["package_placeholders"], list):
for srpm in document["data"]["package_placeholders"]:
srpm_name = srpm["srpm_name"]
if not srpm_name:
continue
build_dependencies = srpm.get("build_dependencies", [])
limit_arches = srpm.get("limit_arches", [])
rpms = srpm.get("rpms", [])
config["package_placeholders"]["srpms"][srpm_name] = {}
config["package_placeholders"]["srpms"][srpm_name]["name"] = srpm_name
config["package_placeholders"]["srpms"][srpm_name]["buildrequires"] = build_dependencies
config["package_placeholders"]["srpms"][srpm_name]["limit_arches"] = limit_arches
for rpm in rpms:
rpm_name = rpm.get("rpm_name", None)
if not rpm_name:
continue
description = rpm.get("description", "Description not provided.")
dependencies = rpm.get("dependencies", [])
rpm_limit_arches = rpm.get("limit_arches", [])
if limit_arches and rpm_limit_arches:
rpm_limit_arches = list(set(limit_arches) & set(rpm_limit_arches))
config["package_placeholders"]["pkgs"][rpm_name] = {}
config["package_placeholders"]["pkgs"][rpm_name]["name"] = rpm_name
config["package_placeholders"]["pkgs"][rpm_name]["description"] = description
config["package_placeholders"]["pkgs"][rpm_name]["requires"] = dependencies
config["package_placeholders"]["pkgs"][rpm_name]["limit_arches"] = rpm_limit_arches
config["package_placeholders"]["pkgs"][rpm_name]["srpm"] = srpm_name
return config
def _load_config_label(document_id, document, settings):
config = {}
config["id"] = document_id
# Step 1: Mandatory fields
try:
# Name is an identifier for humans
config["name"] = str(document["data"]["name"])
# A short description, perhaps hinting the purpose
config["description"] = str(document["data"]["description"])
# Who maintains it? This is just a freeform string
# for humans to read. In Fedora, a FAS nick is recommended.
config["maintainer"] = str(document["data"]["maintainer"])
except KeyError:
raise ConfigError("'{file}.yaml' - There's something wrong with the mandatory fields. Sorry I don't have more specific info.".format(file=document_id))
# Step 2: Optional fields
# none here
return config
def _load_config_compose_view(document_id, document, settings):
config = {}
config["id"] = document_id
config["type"] = "compose"
# Step 1: Mandatory fields
try:
# Name is an identifier for humans
config["name"] = str(document["data"]["name"])
# A short description, perhaps hinting the purpose
config["description"] = str(document["data"]["description"])
# Who maintains it? This is just a freeform string
# for humans to read. In Fedora, a FAS nick is recommended.
config["maintainer"] = str(document["data"]["maintainer"])
# Labels connect things together.
# Workloads get installed in environments with the same label.
# They also get included in views with the same label.
config["labels"] = []
for repo in document["data"]["labels"]:
config["labels"].append(str(repo))
# Choose one repository that gets used as a source.
config["repository"] = str(document["data"]["repository"])
except KeyError:
raise ConfigError("'{file}.yaml' - There's something wrong with the mandatory fields. Sorry I don't have more specific info.".format(file=document_id))
# Step 2: Optional fields
# Buildroot strategy
config["buildroot_strategy"] = "none"
if "buildroot_strategy" in document["data"]:
if str(document["data"]["buildroot_strategy"]) in ["none", "dep_tracker", "root_logs"]:
config["buildroot_strategy"] = str(document["data"]["buildroot_strategy"])
# Limit this view only to the following architectures
config["architectures"] = []
if "architectures" in document["data"]:
for arch in document["data"]["architectures"]:
config["architectures"].append(str(arch))
if not len(config["architectures"]):
config["architectures"] = settings["allowed_arches"]
# Packages to be flagged as unwanted
config["unwanted_packages"] = []
if "unwanted_packages" in document["data"]:
for pkg in document["data"]["unwanted_packages"]:
config["unwanted_packages"].append(str(pkg))
# Packages to be flagged as unwanted on specific architectures
config["unwanted_arch_packages"] = {}
for arch in settings["allowed_arches"]:
config["unwanted_arch_packages"][arch] = []
if "unwanted_arch_packages" in document["data"]:
for arch, pkgs in document["data"]["unwanted_arch_packages"].items():
if arch not in settings["allowed_arches"]:
log(" Warning: {file}.yaml lists an unsupported architecture: {arch}. Moving on...".format(
file=document_id,
arch=arch
))
continue
for pkg_raw in pkgs:
pkg = str(pkg_raw)
config["unwanted_arch_packages"][arch].append(pkg)
# SRPMs (components) to be flagged as unwanted
config["unwanted_source_packages"] = []
if "unwanted_source_packages" in document["data"]:
for pkg in document["data"]["unwanted_source_packages"]:
config["unwanted_source_packages"].append(str(pkg))
return config
def _load_config_addon_view(document_id, document, settings):
config = {}
config["id"] = document_id
config["type"] = "addon"
# Step 1: Mandatory fields
try:
# Name is an identifier for humans
config["name"] = str(document["data"]["name"])
# A short description, perhaps hinting the purpose
config["description"] = str(document["data"]["description"])
# Who maintains it? This is just a freeform string
# for humans to read. In Fedora, a FAS nick is recommended.
config["maintainer"] = str(document["data"]["maintainer"])
# Labels connect things together.
# Workloads get installed in environments with the same label.
# They also get included in views with the same label.
config["labels"] = []
for repo in document["data"]["labels"]:
config["labels"].append(str(repo))
# Choose one repository that gets used as a source.
config["base_view_id"] = str(document["data"]["base_view_id"])
except KeyError:
raise ConfigError("'{file}.yaml' - There's something wrong with the mandatory fields. Sorry I don't have more specific info.".format(file=document_id))
# Step 2: Optional fields
# Packages to be flagged as unwanted
config["unwanted_packages"] = []
if "unwanted_packages" in document["data"]:
for pkg in document["data"]["unwanted_packages"]:
config["unwanted_packages"].append(str(pkg))
# Packages to be flagged as unwanted on specific architectures
config["unwanted_arch_packages"] = {}
for arch in settings["allowed_arches"]:
config["unwanted_arch_packages"][arch] = []
if "unwanted_arch_packages" in document["data"]:
for arch, pkgs in document["data"]["unwanted_arch_packages"].items():
if arch not in settings["allowed_arches"]:
log(" Warning: {file}.yaml lists an unsupported architecture: {arch}. Moving on...".format(
file=document_id,
arch=arch
))
continue
for pkg_raw in pkgs:
pkg = str(pkg_raw)
config["unwanted_arch_packages"][arch].append(pkg)
# SRPMs (components) to be flagged as unwanted
config["unwanted_source_packages"] = []
if "unwanted_source_packages" in document["data"]:
for pkg in document["data"]["unwanted_source_packages"]:
config["unwanted_source_packages"].append(str(pkg))
return config
def _load_config_unwanted(document_id, document, settings):
config = {}
config["id"] = document_id
# Step 1: Mandatory fields
try:
# Name is an identifier for humans
config["name"] = str(document["data"]["name"])
# A short description, perhaps hinting the purpose
config["description"] = str(document["data"]["description"])
# Who maintains it? This is just a freeform string
# for humans to read. In Fedora, a FAS nick is recommended.
config["maintainer"] = str(document["data"]["maintainer"])
# Labels connect things together.
# Workloads get installed in environments with the same label.
# They also get included in views with the same label.
config["labels"] = []
for repo in document["data"]["labels"]:
config["labels"].append(str(repo))
except KeyError:
raise ConfigError("'{file}.yaml' - There's something wrong with the mandatory fields. Sorry I don't have more specific info.".format(file=document_id))
# Step 2: Optional fields
# Packages to be flagged as unwanted
config["unwanted_packages"] = []
if "unwanted_packages" in document["data"]:
for pkg in document["data"]["unwanted_packages"]:
config["unwanted_packages"].append(str(pkg))
# Packages to be flagged as unwanted on specific architectures
config["unwanted_arch_packages"] = {}
for arch in settings["allowed_arches"]:
config["unwanted_arch_packages"][arch] = []
if "unwanted_arch_packages" in document["data"]:
for arch, pkgs in document["data"]["unwanted_arch_packages"].items():
if arch not in settings["allowed_arches"]:
log(" Warning: {file}.yaml lists an unsupported architecture: {arch}. Moving on...".format(
file=document_id,
arch=arch
))
continue
for pkg_raw in pkgs:
pkg = str(pkg_raw)
config["unwanted_arch_packages"][arch].append(pkg)
# SRPMs (components) to be flagged as unwanted
config["unwanted_source_packages"] = []
if "unwanted_source_packages" in document["data"]:
for pkg in document["data"]["unwanted_source_packages"]:
config["unwanted_source_packages"].append(str(pkg))
# SRPMs (components) to be flagged as unwanted on specific architectures
config["unwanted_arch_source_packages"] = {}
for arch in settings["allowed_arches"]:
config["unwanted_arch_source_packages"][arch] = []
if "unwanted_arch_source_packages" in document["data"]:
for arch, pkgs in document["data"]["unwanted_arch_source_packages"].items():
if arch not in settings["allowed_arches"]:
log(" Warning: {file}.yaml lists an unsupported architecture: {arch}. Moving on...".format(
file=document_id,
arch=arch
))
continue
for pkg_raw in pkgs:
pkg = str(pkg_raw)
config["unwanted_arch_source_packages"][arch].append(pkg)
return config
def _load_config_buildroot(document_id, document, settings):
config = {}
config["id"] = document_id
# Step 1: Mandatory fields
try:
# Who maintains it? This is just a freeform string
# for humans to read. In Fedora, a FAS nick is recommended.
config["maintainer"] = str(document["data"]["maintainer"])
# What view is this for
config["view_id"] = str(document["data"]["view_id"])
except KeyError:
raise ConfigError("'{file}.yaml' - There's something wrong with the mandatory fields. Sorry I don't have more specific info.".format(file=document_id))
# Step 2: Optional fields
config["base_buildroot"] = {}
for arch in settings["allowed_arches"]:
config["base_buildroot"][arch] = []
if "base_buildroot" in document["data"]:
for arch, pkgs in document["data"]["base_buildroot"].items():
if arch not in settings["allowed_arches"]:
log(" Warning: {file}.yaml lists an unsupported architecture: {arch}. Moving on...".format(
file=document_id,
arch=arch
))
continue
if pkgs:
for pkg_raw in pkgs:
pkg = str(pkg_raw)
config["base_buildroot"][arch].append(pkg)
config["source_packages"] = {}
for arch in settings["allowed_arches"]:
config["source_packages"][arch] = {}
if "source_packages" in document["data"]:
for arch, srpms_dict in document["data"]["source_packages"].items():
if arch not in settings["allowed_arches"]:
log(" Warning: {file}.yaml lists an unsupported architecture: {arch}. Moving on...".format(
file=document_id,
arch=arch
))
continue
if not srpms_dict:
continue
for srpm_name, srpm_data in srpms_dict.items():
requires = []
if "requires" in srpm_data:
try:
for pkg_raw in srpm_data["requires"]:
requires.append(str(pkg_raw))
except TypeError:
log(" Warning: {file} has an empty 'requires' field defined which is invalid. Moving on...".format(
file=document_id
))
continue
config["source_packages"][arch][str(srpm_name)] = {}
config["source_packages"][arch][str(srpm_name)]["requires"] = requires
return config
def _load_json_data_buildroot_pkg_relations(document_id, document, settings):
config = {}
config["id"] = document_id
try:
# View ID
config["view_id"] = document["data"]["view_id"]
# Arch
arch = document["data"]["arch"]
if arch not in settings["allowed_arches"]:
raise ConfigError("Error: '{file}.json' lists an unsupported architecture: {arch}.".format(
file=document_id,
arch=arch
))
config["arch"] = arch
#pkg_relations
config["pkg_relations"] = document["data"]["pkgs"]
except KeyError:
raise ConfigError("'{file}.yaml' - There's something wrong with the mandatory fields. Sorry I don't have more specific info.".format(file=document_id))
return config
def get_configs(settings):
log("")
directory = settings["configs"]
if "allowed_arches" not in settings:
err_log("System error: allowed_arches not configured")
raise SettingsError
if not settings["allowed_arches"]:
err_log("System error: no allowed_arches not configured")
raise SettingsError
configs = {}
configs["repos"] = {}
configs["envs"] = {}
configs["workloads"] = {}
configs["labels"] = {}
configs["views"] = {}
configs["unwanteds"] = {}
configs["buildroots"] = {}
configs["buildroot_pkg_relations"] = {}
# Step 1: Load all configs
serious_error_messages = set()
log("Loading yaml files...")
log("---------------------")
for yml_file in os.listdir(directory):
# Only accept yaml files
if not yml_file.endswith(".yaml"):
continue
document_id = yml_file.split(".yaml")[0]
try:
with open(os.path.join(directory, yml_file), "r") as file:
# Safely load the config
try:
document = yaml.safe_load(file)
except yaml.YAMLError as err:
raise ConfigError("Error loading a config '{filename}': {err}".format(
filename=yml_file,
err=err))
# Only accept yaml files stating their purpose!
if not ("document" in document and "version" in document):
raise ConfigError("'{file}.yaml' - doesn't specify the 'document' and/or the 'version' field.".format(file=yml_file))
# === Case: Repository config ===
if document["document"] == "feedback-pipeline-repository":
if document["version"] == 1:
configs["repos"][document_id] = _load_config_repo(document_id, document, settings)
elif document["version"] == 2:
configs["repos"][document_id] = _load_config_repo_v2(document_id, document, settings)
# === Case: Environment config ===
if document["document"] == "feedback-pipeline-environment":
configs["envs"][document_id] = _load_config_env(document_id, document, settings)
# === Case: Workload config ===
if document["document"] == "feedback-pipeline-workload":
configs["workloads"][document_id] = _load_config_workload(document_id, document, settings)
# === Case: Label config ===
if document["document"] == "feedback-pipeline-label":
configs["labels"][document_id] = _load_config_label(document_id, document, settings)
# === Case: View config ===
# (Also including the legacy "feedback-pipeline-compose-view" for backwards compatibility)
if document["document"] in ["feedback-pipeline-view", "feedback-pipeline-compose-view"]:
configs["views"][document_id] = _load_config_compose_view(document_id, document, settings)
# === Case: View addon config ===
if document["document"] == "feedback-pipeline-view-addon":
configs["views"][document_id] = _load_config_addon_view(document_id, document, settings)
# === Case: Unwanted config ===
if document["document"] == "feedback-pipeline-unwanted":
configs["unwanteds"][document_id] = _load_config_unwanted(document_id, document, settings)
# === Case: Buildroot config ===
if document["document"] == "feedback-pipeline-buildroot":
configs["buildroots"][document_id] = _load_config_buildroot(document_id, document, settings)
except ConfigError as err:
serious_error_messages.add(str(err))
continue
if serious_error_messages:
log("")
log(" -------------------------------------------------------------------------")
log(" | 🔥 ERRORS FOUND 🔥 (the following files will be excluded)")
log(" |")
for message in serious_error_messages:
log(" | {}".format(message))
log(" -------------------------------------------------------------------------")
log("")
else:
log("")
log(" ✅ No serious errors found.")
log("")
log(" Done!")
log("")
log("")
# Step 1.5: Load all external data sources
serious_error_messages = set()
log("Loading json files...")
log("---------------------")
log("")
for json_file in os.listdir(directory):
# Only accept yaml files
if not json_file.endswith(".json"):
continue
document_id = json_file.split(".json")[0]
try:
try:
json_data = load_data(os.path.join(directory, json_file))
except:
raise ConfigError("Error loading a JSON data file '{filename}': {err}".format(
filename=json_file,
err=err))
# Only accept json files stating their purpose!
if not ("document_type" in json_data and "version" in json_data):
raise ConfigError("'{file}.yaml' - doesn't specify the 'document' and/or the 'version' field.".format(file=json_file))
# === Case: Buildroot pkg relations data ===
if json_data["document_type"] == "buildroot-binary-relations":
configs["buildroot_pkg_relations"][document_id] = _load_json_data_buildroot_pkg_relations(document_id, json_data, settings)
except ConfigError as err:
serious_error_messages.add(str(err))
continue
if serious_error_messages:
log("")
log(" -------------------------------------------------------------------------")
log(" | 🔥 ERRORS FOUND 🔥 (the following files will be excluded)")
log(" |")
for message in serious_error_messages:
log(" | {}".format(message))
log(" -------------------------------------------------------------------------")
log("")
else:
log("")
log(" ✅ No serious errors found.")
log("")
log(" Done!")
log("")
log("")
# Step 2: cross check configs for references and other validation
#
# Also, for some configs, such as the view addon, add some fields
# from its base view
#
# They need to be checked in some logical order, because
# invalid configs get removed. So, for example, I need to first
# check the compose views before checking the addon views,
# because if I need to ditch a proper view, I can't use any
# of the addon views either.
log("Additional validations...")
log("-------------------------")
# Delete views referencing non-existing repos
for view_conf_id, view_conf in configs["views"].items():
if view_conf["type"] == "compose":
if view_conf["repository"] not in configs["repos"]:
log(" View {} is referencing a non-existing repository. Removing it.".format(view_conf_id))
del configs["views"][view_conf_id]
# Delete add-on views referencing non-existing or invalid base view
for view_conf_id, view_conf in configs["views"].items():
if view_conf["type"] == "addon":
base_view_id = view_conf["base_view_id"]
if base_view_id not in configs["views"]:
log(" Addon view {} is referencing a non-existing base_view_id. Removing it.".format(view_conf_id))
del configs["views"][view_conf_id]
else:
base_view = configs["views"][base_view_id]
if base_view["type"] != "compose":
log(" Addon view {} is referencing an addon base_view_id, which is not supported. Removing it.".format(view_conf_id))
del configs["views"][view_conf_id]
# Ading some extra fields onto the addon view
configs["views"][view_conf_id]["repository"] = configs["views"][base_view_id]["repository"]
configs["views"][view_conf_id]["architectures"] = configs["views"][base_view_id]["architectures"]
# Adjust view architecture based on repository architectures
for view_conf_id, view_conf in configs["views"].items():
if view_conf["type"] == "compose":
if not len(view_conf["architectures"]):
view_conf["architectures"] = settings["allowed_arches"]
actual_arches = set()
for arch in view_conf["architectures"]:
repo_id = view_conf["repository"]
if arch in configs["repos"][repo_id]["source"]["architectures"]:
actual_arches.add(arch)
view_conf["architectures"] = sorted(list(actual_arches))
# Adjust addon view architecture based on its base view architectures
for view_conf_id, view_conf in configs["views"].items():
if view_conf["type"] == "addon":
if not len(view_conf["architectures"]):
view_conf["architectures"] = settings["allowed_arches"]
actual_arches = set()
for arch in view_conf["architectures"]:
base_view_id = view_conf["base_view_id"]
if arch in configs["views"][base_view_id]["architectures"]:
actual_arches.add(arch)
view_conf["architectures"] = sorted(list(actual_arches))
# FIXME: Check other configs, too!
log("")
log(" ✅ No serious errors found.")
log("")
log(" Done!")
log("")
log("")
log("Summary:")
log("--------")
log("")
log("Standard yaml configs:")
log(" - {} repositories".format(len(configs["repos"])))
log(" - {} environments".format(len(configs["envs"])))
log(" - {} workloads".format(len(configs["workloads"])))
#log(" - {} labels".format(len(configs["labels"])))
log(" - {} views".format(len(configs["views"])))
log(" - {} exclusion lists".format(len(configs["unwanteds"])))
log("")
log("Additional configs: (soon to be deprecated)")
log(" - {} buildroots".format(len(configs["buildroots"])))
log(" - {} buildroot pkg relations JSONs".format(len(configs["buildroot_pkg_relations"])))
log("")
return configs
###############################################################################
### Analysis ##################################################################
###############################################################################
class Analyzer():
###############################################################################
### Analyzing stuff! ##########################################################
###############################################################################
# Configs:
# TYPE: KEY: ID:
# - repo repos repo_id
# - env_conf envs env_id
# - workload_conf workloads workload_id
# - label labels label_id
# - conf_view views view_id
#
# Data:
# TYPE: KEY: ID:
# - pkg pkgs/repo_id/arch NEVR
# - env envs env_id:repo_id:arch_id
# - workload workloads workload_id:env_id:repo_id:arch_id
# - view views view_id:repo_id:arch_id
#
# self.tmp_dnf_cachedir is either "dnf_cachedir" in TemporaryDirectory or set by --dnf-cache-dir
# contents:
# - "dnf_cachedir-{repo}-{arch}" <-- internal DNF cache
#
# self.tmp_installroots is "installroots" in TemporaryDirectory
# contents:
# - "dnf_generic_installroot-{repo}-{arch}" <-- installroots for _analyze_pkgs
# - "dnf_env_installroot-{env_conf}-{repo}-{arch}" <-- installroots for envs and workloads and buildroots
#
#
def __init__(self, configs, settings):
self.workload_queue = {}
self.workload_queue_counter_total = 0
self.workload_queue_counter_current = 0
self.current_subprocesses = 0
self.configs = configs
self.settings = settings
self.global_dnf_repo_cache = {}
self.data = {}
self.cache = {}
self.cache["root_log_deps"] = {}
self.cache["root_log_deps"]["current"] = {}
self.cache["root_log_deps"]["next"] = {}
try:
self.cache["root_log_deps"]["current"] = load_data(self.settings["root_log_deps_cache_path"])
except FileNotFoundError:
pass
def _load_repo_cached(self, base, repo, arch):
repo_id = repo["id"]
exists = True
if repo_id not in self.global_dnf_repo_cache:
exists = False
self.global_dnf_repo_cache[repo_id] = {}
elif arch not in self.global_dnf_repo_cache[repo_id]:
exists = False
if exists:
#log(" Loading repos from cache...")
for repo in self.global_dnf_repo_cache[repo_id][arch]:
base.repos.add(repo)
else:
#log(" Loading repos using DNF...")
for repo_name, repo_data in repo["source"]["repos"].items():
if repo_data["limit_arches"]:
if arch not in repo_data["limit_arches"]:
#log(" Skipping {} on {}".format(repo_name, arch))
continue
#log(" Including {}".format(repo_name))
additional_repo = dnf.repo.Repo(
name=repo_name,
parent_conf=base.conf
)
additional_repo.baseurl = repo_data["baseurl"]
additional_repo.priority = repo_data["priority"]
additional_repo.exclude = repo_data["exclude"]
base.repos.add(additional_repo)
# Additional repository (if configured)
#if repo["source"]["additional_repository"]:
# additional_repo = dnf.repo.Repo(name="additional-repository",parent_conf=base.conf)
# additional_repo.baseurl = [repo["source"]["additional_repository"]]
# additional_repo.priority = 1
# base.repos.add(additional_repo)
# All other system repos
#base.read_all_repos()
self.global_dnf_repo_cache[repo_id][arch] = []
for repo in base.repos.iter_enabled():
self.global_dnf_repo_cache[repo_id][arch].append(repo)
def _analyze_pkgs(self, repo, arch):
log("Analyzing pkgs for {repo_name} ({repo_id}) {arch}".format(
repo_name=repo["name"],
repo_id=repo["id"],
arch=arch
))
with dnf.Base() as base:
base.conf.debuglevel = 0
base.conf.errorlevel = 0
base.conf.logfilelevel = 0
# Local DNF cache
cachedir_name = "dnf_cachedir-{repo}-{arch}".format(
repo=repo["id"],
arch=arch
)
base.conf.cachedir = os.path.join(self.tmp_dnf_cachedir, cachedir_name)
# Generic installroot
root_name = "dnf_generic_installroot-{repo}-{arch}".format(
repo=repo["id"],
arch=arch
)
base.conf.installroot = os.path.join(self.tmp_installroots, root_name)
# Architecture
base.conf.arch = arch
base.conf.ignorearch = True
# Releasever
base.conf.substitutions['releasever'] = repo["source"]["releasever"]
for repo_name, repo_data in repo["source"]["repos"].items():
if repo_data["limit_arches"]:
if arch not in repo_data["limit_arches"]:
log(" Skipping {} on {}".format(repo_name, arch))
continue
log(" Including {}".format(repo_name))
additional_repo = dnf.repo.Repo(
name=repo_name,
parent_conf=base.conf
)
additional_repo.baseurl = repo_data["baseurl"]
additional_repo.priority = repo_data["priority"]
base.repos.add(additional_repo)
# Additional repository (if configured)
#if repo["source"]["additional_repository"]:
# additional_repo = dnf.repo.Repo(name="additional-repository",parent_conf=base.conf)
# additional_repo.baseurl = [repo["source"]["additional_repository"]]
# additional_repo.priority = 1
# base.repos.add(additional_repo)
# Load repos
log(" Loading repos...")
#base.read_all_repos()
# At this stage, I need to get all packages from the repo listed.
# That also includes modular packages. Modular packages in non-enabled
# streams would be normally hidden. So I mark all the available repos as
# hotfix repos to make all packages visible, including non-enabled streams.
for repo in base.repos.all():
repo.module_hotfixes = True
# This sometimes fails, so let's try at least N times
# before totally giving up!
MAX_TRIES = 10
attempts = 0
success = False
while attempts < MAX_TRIES:
try:
base.fill_sack(load_system_repo=False)
success = True
break
except dnf.exceptions.RepoError as err:
attempts +=1
log(" Failed to download repodata. Trying again!")
if not success:
err = "Failed to download repodata while analyzing repo '{repo_name} ({repo_id}) {arch}".format(
repo_name=repo["name"],
repo_id=repo["id"],
arch=arch
)
err_log(err)
raise RepoDownloadError(err)
# DNF query
query = base.sack.query
# Get all packages
all_pkgs_set = set(query())
pkgs = {}
for pkg_object in all_pkgs_set:
pkg_nevra = "{name}-{evr}.{arch}".format(
name=pkg_object.name,
evr=pkg_object.evr,
arch=pkg_object.arch
)
pkg_nevr = "{name}-{evr}".format(
name=pkg_object.name,
evr=pkg_object.evr
)
pkg = {}
pkg["id"] = pkg_nevra
pkg["name"] = pkg_object.name
pkg["evr"] = pkg_object.evr
pkg["nevr"] = pkg_nevr
pkg["arch"] = pkg_object.arch
pkg["installsize"] = pkg_object.installsize
pkg["description"] = pkg_object.description
#pkg["provides"] = pkg_object.provides
#pkg["requires"] = pkg_object.requires
#pkg["recommends"] = pkg_object.recommends
#pkg["suggests"] = pkg_object.suggests
pkg["summary"] = pkg_object.summary
pkg["source_name"] = pkg_object.source_name
pkg["sourcerpm"] = pkg_object.sourcerpm
pkg["reponame"] = pkg_object.reponame
pkgs[pkg_nevra] = pkg
log(" Done! ({pkg_count} packages in total)".format(
pkg_count=len(pkgs)
))
log("")
return pkgs
def _analyze_package_relations(self, dnf_query, package_placeholders = None):
relations = {}
for pkg in dnf_query:
pkg_id = "{name}-{evr}.{arch}".format(
name=pkg.name,
evr=pkg.evr,
arch=pkg.arch
)
required_by = set()
recommended_by = set()
suggested_by = set()
for dep_pkg in dnf_query.filter(requires=[pkg]):
dep_pkg_id = "{name}-{evr}.{arch}".format(
name=dep_pkg.name,
evr=dep_pkg.evr,
arch=dep_pkg.arch
)
required_by.add(dep_pkg_id)
for dep_pkg in dnf_query.filter(recommends=[pkg]):
dep_pkg_id = "{name}-{evr}.{arch}".format(
name=dep_pkg.name,
evr=dep_pkg.evr,
arch=dep_pkg.arch
)
recommended_by.add(dep_pkg_id)
for dep_pkg in dnf_query.filter(suggests=[pkg]):
dep_pkg_id = "{name}-{evr}.{arch}".format(
name=dep_pkg.name,
evr=dep_pkg.evr,
arch=dep_pkg.arch
)
suggested_by.add(dep_pkg_id)
relations[pkg_id] = {}
relations[pkg_id]["required_by"] = sorted(list(required_by))
relations[pkg_id]["recommended_by"] = sorted(list(recommended_by))
relations[pkg_id]["suggested_by"] = sorted(list(suggested_by))
relations[pkg_id]["source_name"] = pkg.source_name
relations[pkg_id]["reponame"] = pkg.reponame
if package_placeholders:
for placeholder_name,placeholder_data in package_placeholders.items():
placeholder_id = pkg_placeholder_name_to_id(placeholder_name)
relations[placeholder_id] = {}
relations[placeholder_id]["required_by"] = []
relations[placeholder_id]["recommended_by"] = []
relations[placeholder_id]["suggested_by"] = []
relations[placeholder_id]["reponame"] = None
for placeholder_name,placeholder_data in package_placeholders.items():
placeholder_id = pkg_placeholder_name_to_id(placeholder_name)
for placeholder_dependency_name in placeholder_data["requires"]:
for pkg_id in relations:
pkg_name = pkg_id_to_name(pkg_id)
if pkg_name == placeholder_dependency_name:
relations[pkg_id]["required_by"].append(placeholder_id)
return relations
def _analyze_env_without_leaking(self, env_conf, repo, arch):
# DNF leaks memory and file descriptors :/
#
# So, this workaround runs it in a subprocess that should have its resources
# freed when done!
queue_result = multiprocessing.Queue()
process = multiprocessing.Process(target=self._analyze_env_process, args=(queue_result, env_conf, repo, arch))
process.start()
process.join()
# This basically means there was an exception in the processing and the process crashed
if queue_result.empty():
raise AnalysisError
env = queue_result.get()
return env
def _analyze_env_process(self, queue_result, env_conf, repo, arch):
env = self._analyze_env(env_conf, repo, arch)
queue_result.put(env)
def _analyze_env(self, env_conf, repo, arch):
env = {}
env["env_conf_id"] = env_conf["id"]
env["pkg_ids"] = []
env["repo_id"] = repo["id"]
env["arch"] = arch
env["pkg_relations"] = []
env["errors"] = {}
env["errors"]["non_existing_pkgs"] = []
env["succeeded"] = True
with dnf.Base() as base:
base.conf.debuglevel = 0
base.conf.errorlevel = 0
base.conf.logfilelevel = 0
# Local DNF cache
cachedir_name = "dnf_cachedir-{repo}-{arch}".format(
repo=repo["id"],
arch=arch
)
base.conf.cachedir = os.path.join(self.tmp_dnf_cachedir, cachedir_name)
# Environment installroot
root_name = "dnf_env_installroot-{env_conf}-{repo}-{arch}".format(
env_conf=env_conf["id"],
repo=repo["id"],
arch=arch
)
base.conf.installroot = os.path.join(self.tmp_installroots, root_name)
# Architecture
base.conf.arch = arch
base.conf.ignorearch = True
# Releasever
base.conf.substitutions['releasever'] = repo["source"]["releasever"]
# Additional DNF Settings
base.conf.tsflags.append('justdb')
base.conf.tsflags.append('noscripts')
# Environment config
if "include-weak-deps" not in env_conf["options"]:
base.conf.install_weak_deps = False
if "include-docs" not in env_conf["options"]:
base.conf.tsflags.append('nodocs')
# Load repos
#log(" Loading repos...")
#base.read_all_repos()
self._load_repo_cached(base, repo, arch)
# This sometimes fails, so let's try at least N times
# before totally giving up!
MAX_TRIES = 10
attempts = 0
success = False
while attempts < MAX_TRIES:
try:
base.fill_sack(load_system_repo=False)
success = True
break
except dnf.exceptions.RepoError as err:
attempts +=1
log(" Failed to download repodata. Trying again!")
if not success:
err = "Failed to download repodata while analyzing environment '{env_conf}' from '{repo}' {arch}:".format(
env_conf=env_conf["id"],
repo=repo["id"],
arch=arch
)
err_log(err)
raise RepoDownloadError(err)
# Packages
log(" Adding packages...")
for pkg in env_conf["packages"]:
try:
base.install(pkg)
except dnf.exceptions.MarkingError:
env["errors"]["non_existing_pkgs"].append(pkg)
continue
# Groups
log(" Adding groups...")
if env_conf["groups"]:
base.read_comps(arch_filter=True)
for grp_spec in env_conf["groups"]:
group = base.comps.group_by_pattern(grp_spec)
if not group:
env["errors"]["non_existing_pkgs"].append(grp_spec)
continue
base.group_install(group.id, ['mandatory', 'default'])
# Architecture-specific packages
for pkg in env_conf["arch_packages"][arch]:
try:
base.install(pkg)
except dnf.exceptions.MarkingError:
env["errors"]["non_existing_pkgs"].append(pkg)
continue
# Resolve dependencies
log(" Resolving dependencies...")
try:
base.resolve()
except dnf.exceptions.DepsolveError as err:
err_log("Failed to analyze environment '{env_conf}' from '{repo}' {arch}:".format(
env_conf=env_conf["id"],
repo=repo["id"],
arch=arch
))
err_log(" - {err}".format(err=err))
env["succeeded"] = False
env["errors"]["message"] = str(err)
return env
# Write the result into RPMDB.
# The transaction needs us to download all the packages. :(
# So let's do that to make it happy.
log(" Downloading packages...")
base.download_packages(base.transaction.install_set)
log(" Running DNF transaction, writing RPMDB...")
try:
base.do_transaction()
except (dnf.exceptions.TransactionCheckError, dnf.exceptions.Error) as err:
err_log("Failed to analyze environment '{env_conf}' from '{repo}' {arch}:".format(
env_conf=env_conf["id"],
repo=repo["id"],
arch=arch
))
err_log(" - {err}".format(err=err))
env["succeeded"] = False
env["errors"]["message"] = str(err)
return env
# DNF Query
log(" Creating a DNF Query object...")
query = base.sack.query().filterm(pkg=base.transaction.install_set)
for pkg in query:
pkg_id = "{name}-{evr}.{arch}".format(
name=pkg.name,
evr=pkg.evr,
arch=pkg.arch
)
env["pkg_ids"].append(pkg_id)
env["pkg_relations"] = self._analyze_package_relations(query)
log(" Done! ({pkg_count} packages in total)".format(
pkg_count=len(env["pkg_ids"])
))
log("")
return env
def _analyze_envs(self):
envs = {}
# Look at all env configs...
for env_conf_id, env_conf in self.configs["envs"].items():
# For each of those, look at all repos it lists...
for repo_id in env_conf["repositories"]:
# And for each of the repo, look at all arches it supports.
repo = self.configs["repos"][repo_id]
for arch in repo["source"]["architectures"]:
# Now it has
# all env confs *
# repos each config lists *
# archeas each repo supports
# Analyze all of that!
log("Analyzing {env_name} ({env_id}) from {repo_name} ({repo}) {arch}...".format(
env_name=env_conf["name"],
env_id=env_conf_id,
repo_name=repo["name"],
repo=repo_id,
arch=arch
))
env_id = "{env_conf_id}:{repo_id}:{arch}".format(
env_conf_id=env_conf_id,
repo_id=repo_id,
arch=arch
)
envs[env_id] = self._analyze_env(env_conf, repo, arch)
self.data["envs"] = envs
def _return_failed_workload_env_err(self, workload_conf, env_conf, repo, arch):
workload = {}
workload["workload_conf_id"] = workload_conf["id"]
workload["env_conf_id"] = env_conf["id"]
workload["repo_id"] = repo["id"]
workload["arch"] = arch
workload["pkg_env_ids"] = []
workload["pkg_added_ids"] = []
workload["pkg_placeholder_ids"] = []
workload["pkg_relations"] = []
workload["errors"] = {}
workload["errors"]["non_existing_pkgs"] = []
workload["succeeded"] = False
workload["env_succeeded"] = False
workload["errors"]["message"] = """
Failed to analyze this workload because of an error while analyzing the environment.
Please see the associated environment results for a detailed error message.
"""
return workload
def _analyze_workload(self, workload_conf, env_conf, repo, arch):
workload = {}
workload["workload_conf_id"] = workload_conf["id"]
workload["env_conf_id"] = env_conf["id"]
workload["repo_id"] = repo["id"]
workload["arch"] = arch
workload["pkg_env_ids"] = []
workload["pkg_added_ids"] = []
workload["pkg_placeholder_ids"] = []
workload["srpm_placeholder_names"] = []
workload["enabled_modules"] = []
workload["pkg_relations"] = []
workload["errors"] = {}
workload["errors"]["non_existing_pkgs"] = []
workload["errors"]["non_existing_placeholder_deps"] = []
workload["warnings"] = {}
workload["warnings"]["non_existing_pkgs"] = []
workload["warnings"]["non_existing_placeholder_deps"] = []
workload["warnings"]["message"] = None
workload["succeeded"] = True
workload["env_succeeded"] = True
# Figure out the workload labels
# It can only have labels that are in both the workload_conf and the env_conf
workload["labels"] = list(set(workload_conf["labels"]) & set(env_conf["labels"]))
with dnf.Base() as base:
base.conf.debuglevel = 0
base.conf.errorlevel = 0
base.conf.logfilelevel = 0
# Local DNF cache
cachedir_name = "dnf_cachedir-{repo}-{arch}".format(
repo=repo["id"],
arch=arch
)
base.conf.cachedir = os.path.join(self.tmp_dnf_cachedir, cachedir_name)
# Environment installroot
# Since we're not writing anything into the installroot,
# let's just use the base image's installroot!
root_name = "dnf_env_installroot-{env_conf}-{repo}-{arch}".format(
env_conf=env_conf["id"],
repo=repo["id"],
arch=arch
)
base.conf.installroot = os.path.join(self.tmp_installroots, root_name)
# Architecture
base.conf.arch = arch
base.conf.ignorearch = True
# Releasever
base.conf.substitutions['releasever'] = repo["source"]["releasever"]
# Environment config
if "include-weak-deps" not in workload_conf["options"]:
base.conf.install_weak_deps = False
if "include-docs" not in workload_conf["options"]:
base.conf.tsflags.append('nodocs')
# Load repos
#log(" Loading repos...")
#base.read_all_repos()
self._load_repo_cached(base, repo, arch)
# Now I need to load the local RPMDB.
# However, if the environment is empty, it wasn't created, so I need to treat
# it differently. So let's check!
if len(env_conf["packages"]) or len(env_conf["arch_packages"][arch]):
# It's not empty! Load local data.
base.fill_sack(load_system_repo=True)
else:
# It's empty. Treat it like we're using an empty installroot.
# This sometimes fails, so let's try at least N times
# before totally giving up!
MAX_TRIES = 10
attempts = 0
success = False
while attempts < MAX_TRIES:
try:
base.fill_sack(load_system_repo=False)
success = True
break
except dnf.exceptions.RepoError as err:
attempts +=1
#log(" Failed to download repodata. Trying again!")
if not success:
err = "Failed to download repodata while analyzing workload '{workload_id} on '{env_id}' from '{repo}' {arch}...".format(
workload_id=workload_conf_id,
env_id=env_conf_id,
repo_name=repo["name"],
repo=repo_id,
arch=arch)
err_log(err)
raise RepoDownloadError(err)
# Disabling modules
if workload_conf["modules_disable"]:
try:
#log(" Disabling modules...")
module_base = dnf.module.module_base.ModuleBase(base)
module_base.disable(workload_conf["modules_disable"])
except dnf.exceptions.MarkingErrors as err:
workload["succeeded"] = False
workload["errors"]["message"] = str(err)
#log(" Failed! (Error message will be on the workload results page.")
#log("")
return workload
# Enabling modules
if workload_conf["modules_enable"]:
try:
#log(" Enabling modules...")
module_base = dnf.module.module_base.ModuleBase(base)
module_base.enable(workload_conf["modules_enable"])
except dnf.exceptions.MarkingErrors as err:
workload["succeeded"] = False
workload["errors"]["message"] = str(err)
#log(" Failed! (Error message will be on the workload results page.")
#log("")
return workload
# Get a list of enabled modules
# The official DNF API doesn't support it. I got this from the DNF folks
# (thanks!) as a solution, but just keeping it in a generic try/except
# as it's not an official API.
enabled_modules = set()
try:
all_modules = base._moduleContainer.getModulePackages()
for module in all_modules:
if base._moduleContainer.isEnabled(module):
module_name = module.getName()
module_stream = module.getStream()
module_nsv = "{module_name}:{module_stream}".format(
module_name=module_name,
module_stream=module_stream
)
enabled_modules.add(module_nsv)
except:
#log(" Something went wrong with getting a list of enabled modules. (This uses non-API DNF calls. Skipping.)")
enabled_modules = set()
workload["enabled_modules"] = list(enabled_modules)
# Packages
#log(" Adding packages...")
for pkg in workload_conf["packages"]:
try:
base.install(pkg)
except dnf.exceptions.MarkingError:
if pkg in self.settings["weird_packages_that_can_not_be_installed"]:
continue
else:
if "strict" in workload_conf["options"]:
workload["errors"]["non_existing_pkgs"].append(pkg)
else:
workload["warnings"]["non_existing_pkgs"].append(pkg)
continue
# Groups
#log(" Adding groups...")
if workload_conf["groups"]:
base.read_comps(arch_filter=True)
for grp_spec in workload_conf["groups"]:
group = base.comps.group_by_pattern(grp_spec)
if not group:
workload["errors"]["non_existing_pkgs"].append(grp_spec)
continue
base.group_install(group.id, ['mandatory', 'default'])
# TODO: Mark group packages as required... the following code doesn't work
#for pkg in group.packages_iter():
# print(pkg.name)
# workload_conf["packages"].append(pkg.name)
# Filter out the relevant package placeholders for this arch
package_placeholders = {}
for placeholder_name, placeholder_data in workload_conf["package_placeholders"]["pkgs"].items():
# If this placeholder is not limited to just a usbset of arches, add it
if not placeholder_data["limit_arches"]:
package_placeholders[placeholder_name] = placeholder_data
# otherwise it is limited. In that case, only add it if the current arch is on its list
elif arch in placeholder_data["limit_arches"]:
package_placeholders[placeholder_name] = placeholder_data
# Same for SRPM placeholders
srpm_placeholders = {}
for placeholder_name, placeholder_data in workload_conf["package_placeholders"]["srpms"].items():
# If this placeholder is not limited to just a usbset of arches, add it
if not placeholder_data["limit_arches"]:
srpm_placeholders[placeholder_name] = placeholder_data
# otherwise it is limited. In that case, only add it if the current arch is on its list
elif arch in placeholder_data["limit_arches"]:
srpm_placeholders[placeholder_name] = placeholder_data
# Dependencies of package placeholders
#log(" Adding package placeholder dependencies...")
for placeholder_name, placeholder_data in package_placeholders.items():
for pkg in placeholder_data["requires"]:
try:
base.install(pkg)
except dnf.exceptions.MarkingError:
if "strict" in workload_conf["options"]:
workload["errors"]["non_existing_placeholder_deps"].append(pkg)
else:
workload["warnings"]["non_existing_placeholder_deps"].append(pkg)
continue
# Architecture-specific packages
for pkg in workload_conf["arch_packages"][arch]:
try:
base.install(pkg)
except dnf.exceptions.MarkingError:
if "strict" in workload_conf["options"]:
workload["errors"]["non_existing_pkgs"].append(pkg)
else:
workload["warnings"]["non_existing_pkgs"].append(pkg)
continue
if workload["errors"]["non_existing_pkgs"] or workload["errors"]["non_existing_placeholder_deps"]:
error_message_list = []
if workload["errors"]["non_existing_pkgs"]:
error_message_list.append("The following required packages are not available:")
for pkg_name in workload["errors"]["non_existing_pkgs"]:
pkg_string = " - {pkg_name}".format(
pkg_name=pkg_name
)
error_message_list.append(pkg_string)
if workload["errors"]["non_existing_placeholder_deps"]:
error_message_list.append("The following dependencies of package placeholders are not available:")
for pkg_name in workload["errors"]["non_existing_placeholder_deps"]:
pkg_string = " - {pkg_name}".format(
pkg_name=pkg_name
)
error_message_list.append(pkg_string)
error_message = "\n".join(error_message_list)
workload["succeeded"] = False
workload["errors"]["message"] = str(error_message)
#log(" Failed! (Error message will be on the workload results page.")
#log("")
return workload
if workload["warnings"]["non_existing_pkgs"] or workload["warnings"]["non_existing_placeholder_deps"]:
error_message_list = []
if workload["warnings"]["non_existing_pkgs"]:
error_message_list.append("The following required packages are not available (and were skipped):")
for pkg_name in workload["warnings"]["non_existing_pkgs"]:
pkg_string = " - {pkg_name}".format(
pkg_name=pkg_name
)
error_message_list.append(pkg_string)
if workload["warnings"]["non_existing_placeholder_deps"]:
error_message_list.append("The following dependencies of package placeholders are not available (and were skipped):")
for pkg_name in workload["warnings"]["non_existing_placeholder_deps"]:
pkg_string = " - {pkg_name}".format(
pkg_name=pkg_name
)
error_message_list.append(pkg_string)
error_message = "\n".join(error_message_list)
workload["warnings"]["message"] = str(error_message)
# Resolve dependencies
#log(" Resolving dependencies...")
try:
base.resolve()
except dnf.exceptions.DepsolveError as err:
workload["succeeded"] = False
workload["errors"]["message"] = str(err)
#log(" Failed! (Error message will be on the workload results page.")
#log("")
return workload
# DNF Query
#log(" Creating a DNF Query object...")
query_env = base.sack.query()
query_added = base.sack.query().filterm(pkg=base.transaction.install_set)
pkgs_env = set(query_env.installed())
pkgs_added = set(base.transaction.install_set)
pkgs_all = set.union(pkgs_env, pkgs_added)
query_all = base.sack.query().filterm(pkg=pkgs_all)
for pkg in pkgs_env:
pkg_id = "{name}-{evr}.{arch}".format(
name=pkg.name,
evr=pkg.evr,
arch=pkg.arch
)
workload["pkg_env_ids"].append(pkg_id)
for pkg in pkgs_added:
pkg_id = "{name}-{evr}.{arch}".format(
name=pkg.name,
evr=pkg.evr,
arch=pkg.arch
)
workload["pkg_added_ids"].append(pkg_id)
# No errors so far? That means the analysis has succeeded,
# so placeholders can be added to the list as well.
# (Failed workloads need to have empty results, that's why)
for placeholder_name in package_placeholders:
workload["pkg_placeholder_ids"].append(pkg_placeholder_name_to_id(placeholder_name))
for srpm_placeholder_name in srpm_placeholders:
workload["srpm_placeholder_names"].append(srpm_placeholder_name)
workload["pkg_relations"] = self._analyze_package_relations(query_all, package_placeholders)
pkg_env_count = len(workload["pkg_env_ids"])
pkg_added_count = len(workload["pkg_added_ids"])
#log(" Done! ({pkg_count} packages in total. That's {pkg_env_count} in the environment, and {pkg_added_count} added.)".format(
# pkg_count=str(pkg_env_count + pkg_added_count),
# pkg_env_count=pkg_env_count,
# pkg_added_count=pkg_added_count
#))
#log("")
return workload
def _analyze_workload_process(self, queue_result, workload_conf, env_conf, repo, arch):
workload = self._analyze_workload(workload_conf, env_conf, repo, arch)
queue_result.put(workload)
async def _analyze_workloads_subset_async(self, task_queue, results):
for task in task_queue:
workload_conf = task["workload_conf"]
env_conf = task["env_conf"]
repo = task["repo"]
arch = task["arch"]
workload_id = "{workload_conf_id}:{env_conf_id}:{repo_id}:{arch}".format(
workload_conf_id=workload_conf["id"],
env_conf_id=env_conf["id"],
repo_id=repo["id"],
arch=arch
)
# Max processes
while True:
if self.current_subprocesses < self.settings["max_subprocesses"]:
self.current_subprocesses += 1
break
else:
await asyncio.sleep(.1)
# Log progress
self.workload_queue_counter_current += 1
log("[{} of {}]".format(self.workload_queue_counter_current, self.workload_queue_counter_total))
log("Analyzing workload: {}".format(workload_id))
log("")
queue_result = multiprocessing.Queue()
process = multiprocessing.Process(target=self._analyze_workload_process, args=(queue_result, workload_conf, env_conf, repo, arch), daemon=True)
process.start()
# Now wait a bit for the result.
# This is a terrible way to implement an async way to
# wait for the result with a 222 seconds timeout.
# But it works. If anyone knows how to make it nicer, let me know! :D
# 2 seconds
for _ in range(1, 20):
if queue_result.empty():
await asyncio.sleep(.1)
else:
break
# 20 seconds
for _ in range(1, 20):
if queue_result.empty():
await asyncio.sleep(1)
else:
break
# 200 seconds
for _ in range(1, 20):
if queue_result.empty():
await asyncio.sleep(10)
else:
break
self.current_subprocesses -= 1
# This basically means there was an exception in the processing and the process crashed
if queue_result.empty():
log("")
log("")
log("--------------------------------------------------------------------------")
log("")
log("ERROR: Workload analysis failed")
log("")
log("Details:")
log(" workload_conf: {}".format(workload_conf["id"]))
log(" env_conf: {}".format(env_conf["id"]))
log(" repo: {}".format(repo["id"]))
log(" arch: {}".format(arch))
log("")
log("More details somewhere above.")
log("")
log("--------------------------------------------------------------------------")
log("")
log("")
sys.exit(1)
workload = queue_result.get()
results[workload_id] = workload
async def _analyze_workloads_async(self, results):
tasks = []
for repo in self.workload_queue:
for arch in self.workload_queue[repo]:
task_queue = self.workload_queue[repo][arch]
tasks.append(asyncio.create_task(self._analyze_workloads_subset_async(task_queue, results)))
for task in tasks:
await task
log("DONE!")
def _queue_workload_processing(self, workload_conf, env_conf, repo, arch):
repo_id = repo["id"]
if repo_id not in self.workload_queue:
self.workload_queue[repo_id] = {}
if arch not in self.workload_queue[repo_id]:
self.workload_queue[repo_id][arch] = []
workload_task = {
"workload_conf": workload_conf,
"env_conf" : env_conf,
"repo" : repo,
"arch" : arch
}
self.workload_queue[repo_id][arch].append(workload_task)
self.workload_queue_counter_total += 1
def _reset_workload_processing_queue(self):
self.workload_queue = {}
self.workload_queue_counter_total = 0
self.workload_queue_counter_current = 0
def _analyze_workloads(self):
# Initialise
self.data["workloads"] = {}
self._reset_workload_processing_queue()
# Here, I need to mix and match workloads & envs based on labels
workload_env_map = {}
# Look at all workload configs...
for workload_conf_id, workload_conf in self.configs["workloads"].items():
workload_env_map[workload_conf_id] = set()
# ... and all of their labels.
for label in workload_conf["labels"]:
# And for each label, find all env configs...
for env_conf_id, env_conf in self.configs["envs"].items():
# ... that also have the label.
if label in env_conf["labels"]:
# And save those.
workload_env_map[workload_conf_id].add(env_conf_id)
# And now, look at all workload configs...
for workload_conf_id, workload_conf in self.configs["workloads"].items():
# ... and for each, look at all env configs it should be analyzed in.
for env_conf_id in workload_env_map[workload_conf_id]:
# Each of those envs can have multiple repos associated...
env_conf = self.configs["envs"][env_conf_id]
for repo_id in env_conf["repositories"]:
# ... and each repo probably has multiple architecture.
repo = self.configs["repos"][repo_id]
arches = repo["source"]["architectures"]
# And now, look at all workload configs...
for workload_conf_id, workload_conf in self.configs["workloads"].items():
# ... and for each, look at all env configs it should be analyzed in.
for env_conf_id in workload_env_map[workload_conf_id]:
# Each of those envs can have multiple repos associated...
env_conf = self.configs["envs"][env_conf_id]
for repo_id in env_conf["repositories"]:
# ... and each repo probably has multiple architecture.
repo = self.configs["repos"][repo_id]
for arch in repo["source"]["architectures"]:
# And now it has:
# all workload configs *
# all envs that match those *
# all repos of those envs *
# all arches of those repos.
# That's a lot of stuff! Let's analyze all of that!
# Before even started, look if the env succeeded. If not, there's
# no point in doing anything here.
env_id = "{env_conf_id}:{repo_id}:{arch}".format(
env_conf_id=env_conf["id"],
repo_id=repo["id"],
arch=arch
)
env = self.data["envs"][env_id]
if env["succeeded"]:
self._queue_workload_processing(workload_conf, env_conf, repo, arch)
else:
workload_id = "{workload_conf_id}:{env_conf_id}:{repo_id}:{arch}".format(
workload_conf_id=workload_conf_id,
env_conf_id=env_conf_id,
repo_id=repo_id,
arch=arch
)
self.data["workloads"][workload_id] = _return_failed_workload_env_err(workload_conf, env_conf, repo, arch)
asyncio.run(self._analyze_workloads_async(self.data["workloads"]))
def _init_view_pkg(self, input_pkg, arch, placeholder=False, level=0):
if placeholder:
pkg = {
"id": pkg_placeholder_name_to_id(input_pkg["name"]),
"name": input_pkg["name"],
"evr": "000-placeholder",
"nevr": pkg_placeholder_name_to_nevr(input_pkg["name"]),
"arch": "placeholder",
"installsize": 0,
"description": input_pkg["description"],
"summary": input_pkg["description"],
"source_name": input_pkg["srpm"],
"sourcerpm": "{}-000-placeholder".format(input_pkg["srpm"]),
"q_arch": input_pkg,
"reponame": "n/a"
}
else:
pkg = dict(input_pkg)
pkg["view_arch"] = arch
pkg["placeholder"] = placeholder
pkg["in_workload_ids_all"] = set()
pkg["in_workload_ids_req"] = set()
pkg["in_workload_ids_dep"] = set()
pkg["in_workload_ids_env"] = set()
pkg["in_buildroot_of_srpm_id_all"] = set()
pkg["in_buildroot_of_srpm_id_req"] = set()
pkg["in_buildroot_of_srpm_id_dep"] = set()
pkg["in_buildroot_of_srpm_id_env"] = set()
pkg["unwanted_completely_in_list_ids"] = set()
pkg["unwanted_buildroot_in_list_ids"] = set()
pkg["level"] = []
# Level 0 is runtime
pkg["level"].append({
"all": pkg["in_workload_ids_all"],
"req": pkg["in_workload_ids_req"],
"dep": pkg["in_workload_ids_dep"],
"env": pkg["in_workload_ids_env"],
})
# Level 1 and higher is buildroot
for _ in range(level):
pkg["level"].append({
"all": set(),
"req": set(),
"dep": set(),
"env": set()
})
pkg["required_by"] = set()
pkg["recommended_by"] = set()
pkg["suggested_by"] = set()
return pkg
def _init_view_srpm(self, pkg, level=0):
srpm_id = pkg["sourcerpm"].rsplit(".src.rpm")[0]
srpm = {}
srpm["id"] = srpm_id
srpm["name"] = pkg["source_name"]
srpm["reponame"] = pkg["reponame"]
srpm["pkg_ids"] = set()
srpm["placeholder"] = False
srpm["placeholder_directly_required_pkg_names"] = []
srpm["in_workload_ids_all"] = set()
srpm["in_workload_ids_req"] = set()
srpm["in_workload_ids_dep"] = set()
srpm["in_workload_ids_env"] = set()
srpm["in_buildroot_of_srpm_id_all"] = set()
srpm["in_buildroot_of_srpm_id_req"] = set()
srpm["in_buildroot_of_srpm_id_dep"] = set()
srpm["in_buildroot_of_srpm_id_env"] = set()
srpm["unwanted_completely_in_list_ids"] = set()
srpm["unwanted_buildroot_in_list_ids"] = set()
srpm["level"] = []
# Level 0 is runtime
srpm["level"].append({
"all": srpm["in_workload_ids_all"],
"req": srpm["in_workload_ids_req"],
"dep": srpm["in_workload_ids_dep"],
"env": srpm["in_workload_ids_env"],
})
# Level 1 and higher is buildroot
for _ in range(level):
srpm["level"].append({
"all": set(),
"req": set(),
"dep": set(),
"env": set()
})
return srpm
def _analyze_view(self, view_conf, arch, views):
view_conf_id = view_conf["id"]
log("Analyzing view: {view_name} ({view_conf_id}) for {arch}".format(
view_name=view_conf["name"],
view_conf_id=view_conf_id,
arch=arch
))
view_id = "{view_conf_id}:{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
repo_id = view_conf["repository"]
# Setting up the data buckets for this view
view = {}
view["id"] = view_id
view["view_conf_id"] = view_conf_id
view["arch"] = arch
view["workload_ids"] = []
view["pkgs"] = {}
view["source_pkgs"] = {}
view["modules"] = {}
# Workloads
for workload_id, workload in self.data["workloads"].items():
if workload["repo_id"] != repo_id:
continue
if workload["arch"] != arch:
continue
if not set(workload["labels"]) & set(view_conf["labels"]):
continue
view["workload_ids"].append(workload_id)
log(" Includes {} workloads.".format(len(view["workload_ids"])))
# Packages
for workload_id in view["workload_ids"]:
workload = self.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
workload_conf = self.configs["workloads"][workload_conf_id]
# Packages in the environment
for pkg_id in workload["pkg_env_ids"]:
# Initialise
if pkg_id not in view["pkgs"]:
pkg = self.data["pkgs"][repo_id][arch][pkg_id]
view["pkgs"][pkg_id] = self._init_view_pkg(pkg, arch)
# It's in this wokrload
view["pkgs"][pkg_id]["in_workload_ids_all"].add(workload_id)
# And in the environment
view["pkgs"][pkg_id]["in_workload_ids_env"].add(workload_id)
# Is it also required?
if view["pkgs"][pkg_id]["name"] in workload_conf["packages"]:
view["pkgs"][pkg_id]["in_workload_ids_req"].add(workload_id)
elif view["pkgs"][pkg_id]["name"] in workload_conf["arch_packages"][arch]:
view["pkgs"][pkg_id]["in_workload_ids_req"].add(workload_id)
# pkg_relations
view["pkgs"][pkg_id]["required_by"].update(workload["pkg_relations"][pkg_id]["required_by"])
view["pkgs"][pkg_id]["recommended_by"].update(workload["pkg_relations"][pkg_id]["recommended_by"])
view["pkgs"][pkg_id]["suggested_by"].update(workload["pkg_relations"][pkg_id]["suggested_by"])
# Packages added by this workload (required or dependency)
for pkg_id in workload["pkg_added_ids"]:
# Initialise
if pkg_id not in view["pkgs"]:
pkg = self.data["pkgs"][repo_id][arch][pkg_id]
view["pkgs"][pkg_id] = self._init_view_pkg(pkg, arch)
# It's in this wokrload
view["pkgs"][pkg_id]["in_workload_ids_all"].add(workload_id)
# Is it required?
if view["pkgs"][pkg_id]["name"] in workload_conf["packages"]:
view["pkgs"][pkg_id]["in_workload_ids_req"].add(workload_id)
elif view["pkgs"][pkg_id]["name"] in workload_conf["arch_packages"][arch]:
view["pkgs"][pkg_id]["in_workload_ids_req"].add(workload_id)
# Or a dependency?
else:
view["pkgs"][pkg_id]["in_workload_ids_dep"].add(workload_id)
# pkg_relations
view["pkgs"][pkg_id]["required_by"].update(workload["pkg_relations"][pkg_id]["required_by"])
view["pkgs"][pkg_id]["recommended_by"].update(workload["pkg_relations"][pkg_id]["recommended_by"])
view["pkgs"][pkg_id]["suggested_by"].update(workload["pkg_relations"][pkg_id]["suggested_by"])
# And finally the non-existing, imaginary, package placeholders!
for pkg_id in workload["pkg_placeholder_ids"]:
# Initialise
if pkg_id not in view["pkgs"]:
placeholder = workload_conf["package_placeholders"]["pkgs"][pkg_id_to_name(pkg_id)]
view["pkgs"][pkg_id] = self._init_view_pkg(placeholder, arch, placeholder=True)
# It's in this wokrload
view["pkgs"][pkg_id]["in_workload_ids_all"].add(workload_id)
# Placeholders are by definition required
view["pkgs"][pkg_id]["in_workload_ids_req"].add(workload_id)
# ... including the SRPM placeholders
for srpm_name in workload["srpm_placeholder_names"]:
srpm_id = pkg_placeholder_name_to_nevr(srpm_name)
# Initialise
if srpm_id not in view["source_pkgs"]:
sourcerpm = "{}.src.rpm".format(srpm_id)
view["source_pkgs"][srpm_id] = self._init_view_srpm({"sourcerpm": sourcerpm, "source_name": srpm_name, "reponame": None})
# It's a placeholder
view["source_pkgs"][srpm_id]["placeholder"] = True
# Build requires
view["source_pkgs"][srpm_id]["placeholder_directly_required_pkg_names"] = workload_conf["package_placeholders"]["srpms"][srpm_name]["buildrequires"]
# Oh! And modules
for module_id in workload["enabled_modules"]:
# Initiate
if module_id not in view["modules"]:
view["modules"][module_id] = {}
view["modules"][module_id]["id"] = module_id
view["modules"][module_id]["in_workload_ids_all"] = set()
view["modules"][module_id]["in_workload_ids_req"] = set()
view["modules"][module_id]["in_workload_ids_dep"] = set()
# It's in this workload
view["modules"][module_id]["in_workload_ids_all"].add(workload_id)
# Is it required?
if module_id in workload_conf["modules_enable"]:
view["modules"][module_id]["in_workload_ids_req"].add(workload_id)
else:
view["modules"][module_id]["in_workload_ids_dep"].add(workload_id)
# If this is an addon view, remove all packages that are already in the parent view
if view_conf["type"] == "addon":
base_view_conf_id = view_conf["base_view_id"]
base_view_id = "{base_view_conf_id}:{arch}".format(
base_view_conf_id=base_view_conf_id,
arch=arch
)
for base_view_pkg_id in views[base_view_id]["pkgs"]:
if base_view_pkg_id in view["pkgs"]:
del view["pkgs"][base_view_pkg_id]
# Done with packages!
log(" Includes {} packages.".format(len(view["pkgs"])))
log(" Includes {} modules.".format(len(view["modules"])))
# But not with source packages, that's an entirely different story!
for pkg_id, pkg in view["pkgs"].items():
srpm_id = pkg["sourcerpm"].rsplit(".src.rpm")[0]
if srpm_id not in view["source_pkgs"]:
view["source_pkgs"][srpm_id] = self._init_view_srpm(pkg)
# Include some information from the RPM
view["source_pkgs"][srpm_id]["pkg_ids"].add(pkg_id)
view["source_pkgs"][srpm_id]["in_workload_ids_all"].update(pkg["in_workload_ids_all"])
view["source_pkgs"][srpm_id]["in_workload_ids_req"].update(pkg["in_workload_ids_req"])
view["source_pkgs"][srpm_id]["in_workload_ids_dep"].update(pkg["in_workload_ids_dep"])
view["source_pkgs"][srpm_id]["in_workload_ids_env"].update(pkg["in_workload_ids_env"])
log(" Includes {} source packages.".format(len(view["source_pkgs"])))
log(" DONE!")
log("")
return view
def _analyze_views(self):
views = {}
# First, analyse the standard views
for view_conf_id in self.configs["views"]:
view_conf = self.configs["views"][view_conf_id]
if view_conf["type"] == "compose":
for arch in view_conf["architectures"]:
view = self._analyze_view(view_conf, arch, views)
view_id = view["id"]
views[view_id] = view
# Second, analyse the addon views
# This is important as they need the standard views already available
for view_conf_id in self.configs["views"]:
view_conf = self.configs["views"][view_conf_id]
if view_conf["type"] == "addon":
base_view_conf_id = view_conf["base_view_id"]
base_view_conf = self.configs["views"][base_view_conf_id]
for arch in set(view_conf["architectures"]) & set(base_view_conf["architectures"]):
view = self._analyze_view(view_conf, arch, views)
view_id = view["id"]
views[view_id] = view
self.data["views"] = views
def _populate_buildroot_with_view_srpms(self, view_conf, arch):
view_conf_id = view_conf["id"]
log("Initialising buildroot packages of: {view_name} ({view_conf_id}) for {arch}".format(
view_name=view_conf["name"],
view_conf_id=view_conf_id,
arch=arch
))
view_id = "{view_conf_id}:{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
view = self.data["views"][view_id]
repo_id = view_conf["repository"]
# Initialise the srpms section
if repo_id not in self.data["buildroot"]["srpms"]:
self.data["buildroot"]["srpms"][repo_id] = {}
if arch not in self.data["buildroot"]["srpms"][repo_id]:
self.data["buildroot"]["srpms"][repo_id][arch] = {}
# Initialise each srpm
for srpm_id, srpm in view["source_pkgs"].items():
if srpm["placeholder"]:
directly_required_pkg_names = srpm["placeholder_directly_required_pkg_names"]
else:
# This is the same set in both koji_srpms and srpms
directly_required_pkg_names = set()
# Do I need to extract the build dependencies from koji root_logs?
# Then also save the srpms in the koji_srpm section
if view_conf["buildroot_strategy"] == "root_logs":
srpm_reponame = srpm["reponame"]
koji_api_url = self.configs["repos"][repo_id]["source"]["repos"][srpm_reponame]["koji_api_url"]
koji_files_url = self.configs["repos"][repo_id]["source"]["repos"][srpm_reponame]["koji_files_url"]
koji_id = url_to_id(koji_api_url)
# Initialise the koji_srpms section
if koji_id not in self.data["buildroot"]["koji_srpms"]:
# SRPMs
self.data["buildroot"]["koji_srpms"][koji_id] = {}
# URLs
self.data["buildroot"]["koji_urls"][koji_id] = {}
self.data["buildroot"]["koji_urls"][koji_id]["api"] = koji_api_url
self.data["buildroot"]["koji_urls"][koji_id]["files"] = koji_files_url
if arch not in self.data["buildroot"]["koji_srpms"][koji_id]:
self.data["buildroot"]["koji_srpms"][koji_id][arch] = {}
# Initialise srpms in the koji_srpms section
if srpm_id not in self.data["buildroot"]["koji_srpms"][koji_id][arch]:
self.data["buildroot"]["koji_srpms"][koji_id][arch][srpm_id] = {}
self.data["buildroot"]["koji_srpms"][koji_id][arch][srpm_id]["id"] = srpm_id
self.data["buildroot"]["koji_srpms"][koji_id][arch][srpm_id]["directly_required_pkg_names"] = directly_required_pkg_names
else:
directly_required_pkg_names = self.data["buildroot"]["koji_srpms"][koji_id][arch][srpm_id]["directly_required_pkg_names"]
# Initialise srpms in the srpms section
if srpm_id not in self.data["buildroot"]["srpms"][repo_id][arch]:
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id] = {}
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["id"] = srpm_id
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["directly_required_pkg_names"] = directly_required_pkg_names
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["pkg_relations"] = {}
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["pkg_env_ids"] = set()
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["pkg_added_ids"] = set()
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["errors"] = {}
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["errors"]["non_existing_pkgs"] = set()
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["errors"]["message"] = ""
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["succeeded"] = False
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["queued"] = False
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["processed"] = False
log(" DONE!")
log("")
def _get_build_deps_from_a_root_log(self, root_log):
required_pkgs = []
# The individual states are nicely described inside the for loop.
# They're processed in order
state = 0
for file_line in root_log.splitlines():
# 0/
# parts of the log I don't really care about
if state == 0:
# The next installation is the build deps!
# So I start caring. Next state!
if "Executing command: ['/usr/bin/dnf', 'builddep'" in file_line:
state += 1
# 1/
# getting the "already installed" packages to the list
elif state == 1:
# "Package already installed" indicates it's directly required,
# so save it.
if "is already installed." in file_line:
pkg_name = file_line.split()[3].rsplit("-",2)[0]
required_pkgs.append(pkg_name)
# That's all! Next state!
elif "Dependencies resolved." in file_line:
state += 1
# 2/
# going through the log right before the first package name
elif state == 2:
# The next line will be the first package. Next state!
if "Installing:" in file_line:
state += 1
# 3/
# And now just saving the packages until the "installing dependencies" part
# or the "transaction summary" part if there's no dependencies
elif state == 3:
if "Installing dependencies:" in file_line:
state += 1
elif "Transaction Summary" in file_line:
state += 1
else:
# I need to deal with the following thing...
#
# DEBUG util.py:446: gobject-introspection-devel aarch64 1.70.0-1.fc36 build 1.1 M
# DEBUG util.py:446: graphene-devel aarch64 1.10.6-3.fc35 build 159 k
# DEBUG util.py:446: gstreamer1-plugins-bad-free-devel
# DEBUG util.py:446: aarch64 1.19.2-1.fc36 build 244 k
# DEBUG util.py:446: json-glib-devel aarch64 1.6.6-1.fc36 build 173 k
# DEBUG util.py:446: libXcomposite-devel aarch64 0.4.5-6.fc35 build 16 k
#
# The "gstreamer1-plugins-bad-free-devel" package name is too long to fit in the column,
# so it gets split on two lines.
#
# Which if I take the usual file_line.split()[2] I get the correct name,
# but the next line gives me "aarch64" as a package name which is wrong.
#
# So the usual line has file_line.split() == 8
# The one with the long package name has file_line.split() == 3
# and the one following it has file_line.split() == 7
#
# One more thing... long release!
#
# DEBUG util.py:446: qrencode-devel aarch64 4.0.2-8.fc35 build 13 k
# DEBUG util.py:446: systemtap-sdt-devel aarch64 4.6~pre16291338gf2c14776-1.fc36
# DEBUG util.py:446: build 71 k
# DEBUG util.py:446: tpm2-tss-devel aarch64 3.1.0-4.fc36 build 315 k
#
# So the good one here is file_line.split() == 5.
# And the following is also file_line.split() == 5. Fun!
#
# So if it ends with B, k, M, G it's the wrong line, so skip, otherwise take the package name.
#
# I can also anticipate both get long... that would mean I need to skip file_line.split() == 4.
if len(file_line.split()) == 8 or len(file_line.split()) == 3:
pkg_name = file_line.split()[2]
required_pkgs.append(pkg_name)
elif len(file_line.split()) == 7 or len(file_line.split()) == 4:
continue
elif len(file_line.split()) == 5:
if file_line.split()[4] in ["B", "k", "M", "G"]:
continue
else:
pkg_name = file_line.split()[2]
required_pkgs.append(pkg_name)
else:
raise KojiRootLogError
# 4/
# I'm done. So I can break out of the loop.
elif state == 4:
break
return required_pkgs
def _resolve_srpm_using_root_log(self, srpm_id, arch, koji_session, koji_files_url):
# Buildroot grows pretty quickly. Use a fake one for development.
if self.settings["dev_buildroot"]:
# Making sure there are 3 passes at least, but that it won't get overwhelmed
if srpm_id.rsplit("-",2)[0] in ["bash", "make", "unzip"]:
return ["gawk", "xz", "findutils"]
elif srpm_id.rsplit("-",2)[0] in ["gawk", "xz", "findutils"]:
return ['cpio', 'diffutils']
return ["bash", "make", "unzip"]
# Shim is special.
if srpm_id.rsplit("-",2)[0] in ["shim"]:
log( "It's shim! It gets sometiems tagged from wherever... Let's not even bother!")
return []
# Starting for real!
log(" Talking to Koji API...")
# This sometimes hangs, so I'm giving it a timeout and
# a few extra tries before totally giving up!
MAX_TRIES = 10
attempts = 0
success = False
while attempts < MAX_TRIES:
try:
koji_pkg_data = koji_session.getRPM("{}.src".format(srpm_id))
koji_logs = koji_session.getBuildLogs(koji_pkg_data["build_id"])
success = True
break
except:
attempts +=1
log(" Error talking to Koji API... retrying...")
if not success:
raise KojiRootLogError("Could not talk to Koji API")
koji_log_path = None
for koji_log in koji_logs:
if koji_log["name"] == "root.log":
if koji_log["dir"] == arch or koji_log["dir"] == "noarch":
koji_log_path = koji_log["path"]
root_log_url = "{koji_files_url}/{koji_log_path}".format(
koji_files_url=koji_files_url,
koji_log_path=koji_log_path
)
log(" Downloading the root.log file...")
# This sometimes hangs, so I'm giving it a timeout and
# a few extra tries before totally giving up!
MAX_TRIES = 10
attempts = 0
success = False
while attempts < MAX_TRIES:
try:
with urllib.request.urlopen(root_log_url, timeout=20) as response:
root_log_data = response.read()
root_log_contents = root_log_data.decode('utf-8')
success = True
break
except:
attempts +=1
log(" Error getting the root log... retrying...")
if not success:
raise KojiRootLogError("Could not get a root.log file")
log(" Parsing the root.log file...")
directly_required_pkg_names = self._get_build_deps_from_a_root_log(root_log_contents)
log(" Done!")
return directly_required_pkg_names
def _resolve_srpms_using_root_logs(self, pass_counter):
# This function is idempotent!
#
# That means it can be run many times without affecting the old results.
log("== Resolving SRPMs using root logs - pass {} ========".format(pass_counter))
# Prepare a counter for the log
total_srpms_to_resolve = 0
for koji_id in self.data["buildroot"]["koji_srpms"]:
for arch in self.data["buildroot"]["koji_srpms"][koji_id]:
total_srpms_to_resolve += len(self.data["buildroot"]["koji_srpms"][koji_id][arch])
srpms_to_resolve_counter = 0
# I need to keep sessions open to Koji
# And because in some cases (in mixed repos) packages
# could have been in different koji instances, I need
# multiple Koji sesions!
koji_sessions = {}
for koji_id in self.data["buildroot"]["koji_srpms"]:
koji_urls = self.data["buildroot"]["koji_urls"][koji_id]
# If the cache is empty, initialise it
if koji_id not in self.cache["root_log_deps"]["current"]:
self.cache["root_log_deps"]["current"][koji_id] = {}
if koji_id not in self.cache["root_log_deps"]["next"]:
self.cache["root_log_deps"]["next"][koji_id] = {}
# Initiate Koji sessions
if koji_id not in koji_sessions:
koji_sessions[koji_id] = koji.ClientSession(koji_urls["api"], opts = {"timeout": 20})
for arch in self.data["buildroot"]["koji_srpms"][koji_id]:
# If the cache is empty, initialise it
if arch not in self.cache["root_log_deps"]["current"][koji_id]:
self.cache["root_log_deps"]["current"][koji_id][arch] = {}
if arch not in self.cache["root_log_deps"]["next"][koji_id]:
self.cache["root_log_deps"]["next"][koji_id][arch] = {}
for srpm_id, srpm in self.data["buildroot"]["koji_srpms"][koji_id][arch].items():
srpms_to_resolve_counter += 1
log("")
log("[ Buildroot - pass {} - {} of {} ]".format(pass_counter, srpms_to_resolve_counter, total_srpms_to_resolve))
log("Koji root_log {srpm_id} {arch}".format(
srpm_id=srpm_id,
arch=arch
))
if not srpm["directly_required_pkg_names"]:
if srpm_id in self.cache["root_log_deps"]["current"][koji_id][arch]:
log(" Using Cache!")
directly_required_pkg_names = self.cache["root_log_deps"]["current"][koji_id][arch][srpm_id]
elif srpm_id in self.cache["root_log_deps"]["next"][koji_id][arch]:
log(" Using Cache!")
directly_required_pkg_names = self.cache["root_log_deps"]["next"][koji_id][arch][srpm_id]
else:
log(" Resolving...")
directly_required_pkg_names = self._resolve_srpm_using_root_log(srpm_id, arch, koji_sessions[koji_id], koji_urls["files"])
self.cache["root_log_deps"]["next"][koji_id][arch][srpm_id] = directly_required_pkg_names
# Here it's important to add the packages to the already initiated
# set, because its reference is shared between the koji_srpms and the srpm sections
self.data["buildroot"]["koji_srpms"][koji_id][arch][srpm_id]["directly_required_pkg_names"].update(directly_required_pkg_names)
else:
log(" Skipping! (already done before)")
log("")
log(" DONE!")
log("")
def _analyze_build_groups(self):
log("")
log("Analyzing build groups...")
log("")
# Need to analyse build groups for all repo_ids
# and arches of buildroot["srpms"]
for repo_id in self.data["buildroot"]["srpms"]:
self.data["buildroot"]["build_groups"][repo_id] = {}
for arch in self.data["buildroot"]["srpms"][repo_id]:
generated_id = "CR-buildroot-base-env-{repo_id}-{arch}".format(
repo_id=repo_id,
arch=arch
)
# Using the _analyze_env function!
# So I need to reconstruct a fake env_conf
fake_env_conf = {}
fake_env_conf["id"] = generated_id
fake_env_conf["options"] = []
if self.configs["repos"][repo_id]["source"]["base_buildroot_override"]:
fake_env_conf["packages"] = self.configs["repos"][repo_id]["source"]["base_buildroot_override"]
fake_env_conf["groups"] = []
else:
fake_env_conf["packages"] = []
fake_env_conf["groups"] = ["build"]
fake_env_conf["arch_packages"] = {}
fake_env_conf["arch_packages"][arch] = []
log("Resolving build group: {repo_id} {arch}".format(
repo_id=repo_id,
arch=arch
))
repo = self.configs["repos"][repo_id]
fake_env = self._analyze_env(fake_env_conf, repo, arch)
# If this fails, the buildroot can't be resolved.
# Fail the entire content resolver build!
if not fake_env["succeeded"]:
raise BuildGroupAnalysisError
self.data["buildroot"]["build_groups"][repo_id][arch] = fake_env
self.data["buildroot"]["build_groups"][repo_id][arch]["generated_id"] = generated_id
log("")
log(" DONE!")
log("")
def _expand_buildroot_srpms(self):
# This function is idempotent!
#
# That means it can be run many times without affecting the old results.
log("Expanding the SRPM set...")
counter = 0
for repo_id in self.data["buildroot"]["srpms"]:
for arch in self.data["buildroot"]["srpms"][repo_id]:
top_lvl_srpm_ids = set(self.data["buildroot"]["srpms"][repo_id][arch])
for top_lvl_srpm_id in top_lvl_srpm_ids:
top_lvl_srpm = self.data["buildroot"]["srpms"][repo_id][arch][top_lvl_srpm_id]
for pkg_id in top_lvl_srpm["pkg_relations"]:
srpm_id = self.data["pkgs"][repo_id][arch][pkg_id]["sourcerpm"].rsplit(".src.rpm")[0]
if srpm_id in self.data["buildroot"]["srpms"][repo_id][arch]:
continue
# Adding a new one!
counter += 1
srpm_reponame = self.data["pkgs"][repo_id][arch][pkg_id]["reponame"]
# This is the same set in both koji_srpms and srpms
directly_required_pkg_names = set()
koji_api_url = self.configs["repos"][repo_id]["source"]["repos"][srpm_reponame]["koji_api_url"]
koji_files_url = self.configs["repos"][repo_id]["source"]["repos"][srpm_reponame]["koji_files_url"]
koji_id = url_to_id(koji_api_url)
# Initialise the srpm in the koji_srpms section
if srpm_id not in self.data["buildroot"]["koji_srpms"][koji_id][arch]:
self.data["buildroot"]["koji_srpms"][koji_id][arch][srpm_id] = {}
self.data["buildroot"]["koji_srpms"][koji_id][arch][srpm_id]["id"] = srpm_id
self.data["buildroot"]["koji_srpms"][koji_id][arch][srpm_id]["directly_required_pkg_names"] = directly_required_pkg_names
else:
directly_required_pkg_names = self.data["buildroot"]["koji_srpms"][koji_id][arch][srpm_id]["directly_required_pkg_names"]
# Initialise the srpm in the srpms section
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id] = {}
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["id"] = srpm_id
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["directly_required_pkg_names"] = directly_required_pkg_names
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["pkg_relations"] = {}
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["pkg_env_ids"] = set()
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["pkg_added_ids"] = set()
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["errors"] = {}
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["errors"]["non_existing_pkgs"] = set()
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["errors"]["message"] = ""
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["succeeded"] = False
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["queued"] = False
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["processed"] = False
log(" Found {} new SRPMs!".format(counter))
log(" DONE!")
log("")
return counter
def _analyze_srpm_buildroots(self, pass_counter):
# This function is idempotent!
#
# That means it can be run many times without affecting the old results.
log("")
log("Analyzing SRPM buildroots...")
log("")
# Initialise things for the workload resolver
self._reset_workload_processing_queue()
fake_workload_results = {}
# Prepare a counter for the log
total_srpms_to_resolve = 0
for repo_id in self.data["buildroot"]["srpms"]:
for arch in self.data["buildroot"]["srpms"][repo_id]:
for srpm_id, srpm in self.data["buildroot"]["srpms"][repo_id][arch].items():
if srpm["processed"]:
continue
total_srpms_to_resolve += 1
srpms_to_resolve_counter = 0
for repo_id in self.data["buildroot"]["srpms"]:
for arch in self.data["buildroot"]["srpms"][repo_id]:
for srpm_id, srpm in self.data["buildroot"]["srpms"][repo_id][arch].items():
if srpm["queued"] or srpm["processed"]:
continue
# Using the _analyze_workload function!
# So I need to reconstruct a fake workload_conf and a fake env_conf
fake_workload_conf = {}
fake_workload_conf["labels"] = []
fake_workload_conf["id"] = srpm_id
fake_workload_conf["options"] = []
fake_workload_conf["modules_disable"] = []
fake_workload_conf["modules_enable"] = []
fake_workload_conf["packages"] = srpm["directly_required_pkg_names"]
fake_workload_conf["groups"] = []
fake_workload_conf["package_placeholders"] = {}
fake_workload_conf["package_placeholders"]["pkgs"] = {}
fake_workload_conf["package_placeholders"]["srpms"] = {}
fake_workload_conf["arch_packages"] = {}
fake_workload_conf["arch_packages"][arch] = []
fake_env_conf = {}
fake_env_conf["labels"] = []
fake_env_conf["id"] = self.data["buildroot"]["build_groups"][repo_id][arch]["generated_id"]
fake_env_conf["packages"] = ["bash"] # This just needs to pass the "if len(packages)" test as True
fake_env_conf["arch_packages"] = {}
fake_env_conf["arch_packages"][arch] = []
srpms_to_resolve_counter += 1
#log("[ Buildroot - pass {} - {} of {} ]".format(pass_counter, srpms_to_resolve_counter, total_srpms_to_resolve))
#log("Resolving SRPM buildroot: {repo_id} {arch} {srpm_id}".format(
# repo_id=repo_id,
# arch=arch,
# srpm_id=srpm_id
#))
repo = self.configs["repos"][repo_id]
#fake_workload = self._analyze_workload(fake_workload_conf, fake_env_conf, repo, arch)
self._queue_workload_processing(fake_workload_conf, fake_env_conf, repo, arch)
# Save the buildroot data
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["queued"] = True
asyncio.run(self._analyze_workloads_async(fake_workload_results))
for repo_id in self.data["buildroot"]["srpms"]:
for arch in self.data["buildroot"]["srpms"][repo_id]:
for srpm_id, srpm in self.data["buildroot"]["srpms"][repo_id][arch].items():
if srpm["processed"]:
continue
fake_workload_id = "{workload_conf_id}:{env_conf_id}:{repo_id}:{arch}".format(
workload_conf_id=srpm_id,
env_conf_id=self.data["buildroot"]["build_groups"][repo_id][arch]["generated_id"],
repo_id=repo_id,
arch=arch
)
fake_workload = fake_workload_results[fake_workload_id]
# Save the buildroot data
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["succeeded"] = fake_workload["succeeded"]
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["pkg_relations"] = fake_workload["pkg_relations"]
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["pkg_env_ids"] = fake_workload["pkg_env_ids"]
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["pkg_added_ids"] = fake_workload["pkg_added_ids"]
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["errors"] = fake_workload["errors"]
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["warnings"] = fake_workload["warnings"]
self.data["buildroot"]["srpms"][repo_id][arch][srpm_id]["processed"] = True
log("")
log(" DONE!")
log("")
def _analyze_buildroot(self):
self.data["buildroot"] = {}
self.data["buildroot"]["koji_srpms"] = {}
self.data["buildroot"]["koji_urls"] = {}
self.data["buildroot"]["srpms"] = {}
self.data["buildroot"]["build_groups"] = {}
# Currently, only "compose" view types are supported.
# The "addon" type is not.
# Get SRPMs from views
#
# This populates:
# data["buildroot"]["koji_srpms"]...
# and also initiates:
# data["buildroot"]["srpms"]...
for view_conf_id in self.configs["views"]:
view_conf = self.configs["views"][view_conf_id]
if view_conf["type"] == "compose":
if view_conf["buildroot_strategy"] == "root_logs":
for arch in view_conf["architectures"]:
self._populate_buildroot_with_view_srpms(view_conf, arch)
# Time to resolve the build groups!
#
# This initialises and populates:
# buildroot["build_groups"]
self._analyze_build_groups()
pass_counter = 0
while True:
pass_counter += 1
log("")
log("== Buildroot resolution - pass {} ========".format(pass_counter))
log("")
log("")
# Get the directly_required_pkg_names from koji root logs
#
# Adds stuff to existing:
# data["buildroot"]["koji_srpms"]...
# ... which also updates:
# data["buildroot"]["srpms"]...
# ... because it's interlinked.
self._resolve_srpms_using_root_logs(pass_counter)
# And now resolving the actual buildroot
self._analyze_srpm_buildroots(pass_counter)
# Resolving dependencies could have added new SRPMs into the mix that also
# need their buildroots resolved! So let's find out if there are any
new_srpms_count = self._expand_buildroot_srpms()
if not new_srpms_count:
log("")
log("All passes completed!")
log("")
break
def _add_missing_levels_to_pkg_or_srpm(self, pkg_or_srpm, level):
pkg_current_max_level = len(pkg_or_srpm["level"]) - 1
for _ in range(level - pkg_current_max_level):
pkg_or_srpm["level"].append({
"all": set(),
"req": set(),
"dep": set(),
"env": set()
})
def _add_buildroot_to_view(self, view_conf, arch):
view_conf_id = view_conf["id"]
view_id = "{view_conf_id}:{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
repo_id = view_conf["repository"]
view = self.data["views"][view_id]
log("")
log("Adding buildroot to view {}...".format(view_id))
# Starting with all SRPMs in this view
srpm_ids_to_process = set(view["source_pkgs"])
# Starting on level 1, the first buildroot level
# (it's 0 because it gets incremented to 1 immediately after the loop starts)
level = 0
while True:
level += 1
added_pkg_ids = set()
log(" Pass {}...".format(level))
# This is similar to adding workloads in _analyze_view()
for buildroot_srpm_id in srpm_ids_to_process:
buildroot_srpm = self.data["buildroot"]["srpms"][repo_id][arch][buildroot_srpm_id]
# Packages in the base buildroot (which would be the environment in workloads)
for pkg_id in buildroot_srpm["pkg_env_ids"]:
added_pkg_ids.add(pkg_id)
# Initialise
if pkg_id not in view["pkgs"]:
pkg = self.data["pkgs"][repo_id][arch][pkg_id]
view["pkgs"][pkg_id] = self._init_view_pkg(pkg, arch, level=level)
# Add missing levels to the pkg
self._add_missing_levels_to_pkg_or_srpm(view["pkgs"][pkg_id], level)
# It's in this buildroot
view["pkgs"][pkg_id]["in_buildroot_of_srpm_id_all"].add(buildroot_srpm_id)
view["pkgs"][pkg_id]["level"][level]["all"].add(buildroot_srpm_id)
# And in the base buildroot specifically
view["pkgs"][pkg_id]["in_buildroot_of_srpm_id_env"].add(buildroot_srpm_id)
view["pkgs"][pkg_id]["level"][level]["env"].add(buildroot_srpm_id)
# Is it also required?
if view["pkgs"][pkg_id]["name"] in buildroot_srpm["directly_required_pkg_names"]:
view["pkgs"][pkg_id]["in_buildroot_of_srpm_id_req"].add(buildroot_srpm_id)
view["pkgs"][pkg_id]["level"][level]["req"].add(buildroot_srpm_id)
# pkg_relations
view["pkgs"][pkg_id]["required_by"].update(buildroot_srpm["pkg_relations"][pkg_id]["required_by"])
view["pkgs"][pkg_id]["recommended_by"].update(buildroot_srpm["pkg_relations"][pkg_id]["recommended_by"])
view["pkgs"][pkg_id]["suggested_by"].update(buildroot_srpm["pkg_relations"][pkg_id]["suggested_by"])
# Packages needed on top of the base buildroot (required or dependency)
for pkg_id in buildroot_srpm["pkg_added_ids"]:
added_pkg_ids.add(pkg_id)
# Initialise
if pkg_id not in view["pkgs"]:
pkg = self.data["pkgs"][repo_id][arch][pkg_id]
view["pkgs"][pkg_id] = self._init_view_pkg(pkg, arch, level=level)
# Add missing levels to the pkg
self._add_missing_levels_to_pkg_or_srpm(view["pkgs"][pkg_id], level)
# It's in this buildroot
view["pkgs"][pkg_id]["in_buildroot_of_srpm_id_all"].add(buildroot_srpm_id)
view["pkgs"][pkg_id]["level"][level]["all"].add(buildroot_srpm_id)
# Is it also required?
if view["pkgs"][pkg_id]["name"] in buildroot_srpm["directly_required_pkg_names"]:
view["pkgs"][pkg_id]["in_buildroot_of_srpm_id_req"].add(buildroot_srpm_id)
view["pkgs"][pkg_id]["level"][level]["req"].add(buildroot_srpm_id)
# Or a dependency?
else:
view["pkgs"][pkg_id]["in_buildroot_of_srpm_id_dep"].add(buildroot_srpm_id)
view["pkgs"][pkg_id]["level"][level]["dep"].add(buildroot_srpm_id)
# pkg_relations
view["pkgs"][pkg_id]["required_by"].update(buildroot_srpm["pkg_relations"][pkg_id]["required_by"])
view["pkgs"][pkg_id]["recommended_by"].update(buildroot_srpm["pkg_relations"][pkg_id]["recommended_by"])
view["pkgs"][pkg_id]["suggested_by"].update(buildroot_srpm["pkg_relations"][pkg_id]["suggested_by"])
# Resetting the SRPMs, so only the new ones can be added
srpm_ids_to_process = set()
# SRPMs
for pkg_id in added_pkg_ids:
pkg = view["pkgs"][pkg_id]
srpm_id = pkg["sourcerpm"].rsplit(".src.rpm")[0]
# Initialise
if srpm_id not in view["source_pkgs"]:
view["source_pkgs"][srpm_id] = self._init_view_srpm(pkg, level=level)
srpm_ids_to_process.add(srpm_id)
# Add missing levels to the pkg
self._add_missing_levels_to_pkg_or_srpm(view["source_pkgs"][srpm_id], level)
view["source_pkgs"][srpm_id]["pkg_ids"].add(pkg_id)
# Include some information from the RPM
view["source_pkgs"][srpm_id]["in_buildroot_of_srpm_id_all"].update(pkg["in_buildroot_of_srpm_id_all"])
view["source_pkgs"][srpm_id]["in_buildroot_of_srpm_id_req"].update(pkg["in_buildroot_of_srpm_id_req"])
view["source_pkgs"][srpm_id]["in_buildroot_of_srpm_id_dep"].update(pkg["in_buildroot_of_srpm_id_dep"])
view["source_pkgs"][srpm_id]["in_buildroot_of_srpm_id_env"].update(pkg["in_buildroot_of_srpm_id_env"])
view["source_pkgs"][srpm_id]["level"][level]["all"].update(pkg["level"][level]["all"])
view["source_pkgs"][srpm_id]["level"][level]["req"].update(pkg["level"][level]["req"])
view["source_pkgs"][srpm_id]["level"][level]["dep"].update(pkg["level"][level]["dep"])
view["source_pkgs"][srpm_id]["level"][level]["env"].update(pkg["level"][level]["env"])
log (" added {} RPMs".format(len(added_pkg_ids)))
log (" added {} SRPMs".format(len(srpm_ids_to_process)))
# More iterations needed?
if not srpm_ids_to_process:
log(" All passes completed!")
log("")
break
def _add_buildroot_to_views(self):
log("")
log("Adding Buildroot to views...")
log("")
# First, the standard views
for view_conf_id in self.configs["views"]:
view_conf = self.configs["views"][view_conf_id]
if view_conf["type"] == "compose":
if view_conf["buildroot_strategy"] == "root_logs":
for arch in view_conf["architectures"]:
self._add_buildroot_to_view(view_conf, arch)
# And the addon is not supported now
log("")
log(" DONE!")
log("")
def _init_pkg_or_srpm_relations_fields(self, target_pkg, type = None):
# I kept them all listed so they're easy to copy
# Workload IDs
target_pkg["in_workload_ids_all"] = set()
target_pkg["in_workload_ids_req"] = set()
target_pkg["in_workload_ids_dep"] = set()
target_pkg["in_workload_ids_env"] = set()
# Workload Conf IDs
target_pkg["in_workload_conf_ids_all"] = set()
target_pkg["in_workload_conf_ids_req"] = set()
target_pkg["in_workload_conf_ids_dep"] = set()
target_pkg["in_workload_conf_ids_env"] = set()
# Buildroot SRPM IDs
target_pkg["in_buildroot_of_srpm_id_all"] = set()
target_pkg["in_buildroot_of_srpm_id_req"] = set()
target_pkg["in_buildroot_of_srpm_id_dep"] = set()
target_pkg["in_buildroot_of_srpm_id_env"] = set()
# Buildroot SRPM Names
target_pkg["in_buildroot_of_srpm_name_all"] = {} # of set() of srpm_ids
target_pkg["in_buildroot_of_srpm_name_req"] = {} # of set() of srpm_ids
target_pkg["in_buildroot_of_srpm_name_dep"] = {} # of set() of srpm_ids
target_pkg["in_buildroot_of_srpm_name_env"] = {} # of set() of srpm_ids
# Unwanted
target_pkg["unwanted_completely_in_list_ids"] = set()
target_pkg["unwanted_buildroot_in_list_ids"] = set()
# Level number
target_pkg["level_number"] = 999
# Levels
target_pkg["level"] = []
# Maintainer recommendation
target_pkg["maintainer_recommendation"] = {}
target_pkg["maintainer_recommendation_details"] = {}
target_pkg["best_maintainers"] = set()
if type == "rpm":
# Dependency of RPM NEVRs
target_pkg["dependency_of_pkg_nevrs"] = set()
target_pkg["hard_dependency_of_pkg_nevrs"] = set()
target_pkg["weak_dependency_of_pkg_nevrs"] = set()
# Dependency of RPM Names
target_pkg["dependency_of_pkg_names"] = {} # of set() of nevrs
target_pkg["hard_dependency_of_pkg_names"] = {} # of set() of nevrs
target_pkg["weak_dependency_of_pkg_names"] = {} # if set() of nevrs
def _populate_pkg_or_srpm_relations_fields(self, target_pkg, source_pkg, type = None, view = None):
# source_pkg is the arch-specific binary package
# target_pkg is a representation of that pages for all arches
#
# This function adds information from the arch-specific package to the general one.
# It gets called for all the arches.
#
if type == "rpm" and not view:
raise ValueError("This function requires a view when using type = 'rpm'!")
# Unwanted
target_pkg["unwanted_completely_in_list_ids"].update(source_pkg["unwanted_completely_in_list_ids"])
target_pkg["unwanted_buildroot_in_list_ids"].update(source_pkg["unwanted_buildroot_in_list_ids"])
# Dependency relationships
for list_type in ["all", "req", "dep", "env"]:
target_pkg["in_workload_ids_{}".format(list_type)].update(source_pkg["in_workload_ids_{}".format(list_type)])
target_pkg["in_buildroot_of_srpm_id_{}".format(list_type)].update(source_pkg["in_buildroot_of_srpm_id_{}".format(list_type)])
for workload_id in source_pkg["in_workload_ids_{}".format(list_type)]:
workload_conf_id = workload_id_to_conf_id(workload_id)
target_pkg["in_workload_conf_ids_{}".format(list_type)].add(workload_conf_id)
for srpm_id in source_pkg["in_buildroot_of_srpm_id_{}".format(list_type)]:
srpm_name = pkg_id_to_name(srpm_id)
if srpm_name not in target_pkg["in_buildroot_of_srpm_name_{}".format(list_type)]:
target_pkg["in_buildroot_of_srpm_name_{}".format(list_type)][srpm_name] = set()
target_pkg["in_buildroot_of_srpm_name_{}".format(list_type)][srpm_name].add(srpm_id)
# Level number
level_number = 0
for level in source_pkg["level"]:
if level["all"]:
if level_number < target_pkg["level_number"]:
target_pkg["level_number"] = level_number
level_number += 1
# All the levels!
level = 0
for level_data in source_pkg["level"]:
# 'level' is the number
# 'level_data' is the ["all"][workload_id] or ["all"][srpm_id] or
# ["req"][workload_id] or ["req"][srpm_id] or
# ["dep"][workload_id] or ["dep"][srpm_id] or
# ["env"][workload_id] or ["env"][srpm_id]
# If I could do 'if level in target_pkg["level"]' I'd do that instead...
# But it's a list, so have to do this instead
if len(target_pkg["level"]) == level:
target_pkg["level"].append(dict())
for level_scope, those_ids in level_data.items():
# 'level_scope' is "all" or "req" etc.
# 'those_ids' is a list of srpm_ids or workload_ids
if level_scope not in target_pkg["level"][level]:
target_pkg["level"][level][level_scope] = set()
target_pkg["level"][level][level_scope].update(those_ids)
level +=1
if type == "rpm":
# Hard dependency of
for pkg_id in source_pkg["required_by"]:
pkg_name = pkg_id_to_name(pkg_id)
# This only happens in addon views, and only rarely.
# Basically means that a package in the addon view is required
# by a package in the base view.
# Doesn't make sense?
# Think of 'glibc-all-langpacks' being in the addon,
# while the proper langpacks along with 'glibc' are in the base view.
#
# In that case, 'glibc' is not in the addon, but 'glibc-all-langpacks'
# requires it.
#
# I'm not implementing it now, as it's such a corner case.
# So just skip it. All the data will remain correct,
# it's just the 'glibc-all-langpacks' page won't show
# "required by 'glibc'" that's all.
if pkg_id not in view["pkgs"]:
view_conf_id = view["view_conf_id"]
view_conf = self.configs["views"][view_conf_id]
if view_conf["type"] == "addon":
continue
pkg = view["pkgs"][pkg_id]
pkg_nevr = "{name}-{evr}".format(
name=pkg["name"],
evr=pkg["evr"]
)
target_pkg["hard_dependency_of_pkg_nevrs"].add(pkg_nevr)
if pkg_name not in target_pkg["hard_dependency_of_pkg_names"]:
target_pkg["hard_dependency_of_pkg_names"][pkg_name] = set()
target_pkg["hard_dependency_of_pkg_names"][pkg_name].add(pkg_nevr)
# Weak dependency of
for list_type in ["recommended", "suggested"]:
for pkg_id in source_pkg["{}_by".format(list_type)]:
pkg_name = pkg_id_to_name(pkg_id)
# This only happens in addon views, and only rarely.
# (see the long comment above)
if pkg_id not in view["pkgs"]:
view_conf_id = view["view_conf_id"]
view_conf = self.configs["views"][view_conf_id]
if view_conf["type"] == "addon":
continue
pkg = view["pkgs"][pkg_id]
pkg_nevr = "{name}-{evr}".format(
name=pkg["name"],
evr=pkg["evr"]
)
target_pkg["weak_dependency_of_pkg_nevrs"].add(pkg_nevr)
if pkg_name not in target_pkg["weak_dependency_of_pkg_names"]:
target_pkg["weak_dependency_of_pkg_names"][pkg_name] = set()
target_pkg["weak_dependency_of_pkg_names"][pkg_name].add(pkg_nevr)
# All types of dependency
target_pkg["dependency_of_pkg_nevrs"].update(target_pkg["hard_dependency_of_pkg_nevrs"])
target_pkg["dependency_of_pkg_nevrs"].update(target_pkg["weak_dependency_of_pkg_nevrs"])
for pkg_name, pkg_nevrs in target_pkg["hard_dependency_of_pkg_names"].items():
if pkg_name not in target_pkg["dependency_of_pkg_names"]:
target_pkg["dependency_of_pkg_names"][pkg_name] = set()
target_pkg["dependency_of_pkg_names"][pkg_name].update(pkg_nevrs)
for pkg_name, pkg_nevrs in target_pkg["weak_dependency_of_pkg_names"].items():
if pkg_name not in target_pkg["dependency_of_pkg_names"]:
target_pkg["dependency_of_pkg_names"][pkg_name] = set()
target_pkg["dependency_of_pkg_names"][pkg_name].update(pkg_nevrs)
# TODO: add the levels
def _generate_views_all_arches(self):
views_all_arches = {}
for view_conf_id, view_conf in self.configs["views"].items():
#if view_conf["type"] == "compose":
if True:
repo_id = view_conf["repository"]
view_all_arches = {}
view_all_arches["id"] = view_conf_id
view_all_arches["has_buildroot"] = False
if view_conf["type"] == "compose":
if view_conf["buildroot_strategy"] == "root_logs":
view_all_arches["has_buildroot"] = True
else:
view_all_arches["has_buildroot"] = False
view_all_arches["everything_succeeded"] = True
view_all_arches["no_warnings"] = True
view_all_arches["workloads"] = {}
view_all_arches["pkgs_by_name"] = {}
view_all_arches["pkgs_by_nevr"] = {}
view_all_arches["source_pkgs_by_name"] = {}
view_all_arches["modules"] = {}
view_all_arches["numbers"] = {}
view_all_arches["numbers"]["pkgs"] = {}
view_all_arches["numbers"]["pkgs"]["runtime"] = 0
view_all_arches["numbers"]["pkgs"]["env"] = 0
view_all_arches["numbers"]["pkgs"]["req"] = 0
view_all_arches["numbers"]["pkgs"]["dep"] = 0
view_all_arches["numbers"]["pkgs"]["build"] = 0
view_all_arches["numbers"]["pkgs"]["build_base"] = 0
view_all_arches["numbers"]["pkgs"]["build_level_1"] = 0
view_all_arches["numbers"]["pkgs"]["build_level_2_plus"] = 0
view_all_arches["numbers"]["srpms"] = {}
view_all_arches["numbers"]["srpms"]["runtime"] = 0
view_all_arches["numbers"]["srpms"]["env"] = 0
view_all_arches["numbers"]["srpms"]["req"] = 0
view_all_arches["numbers"]["srpms"]["dep"] = 0
view_all_arches["numbers"]["srpms"]["build"] = 0
view_all_arches["numbers"]["srpms"]["build_base"] = 0
view_all_arches["numbers"]["srpms"]["build_level_1"] = 0
view_all_arches["numbers"]["srpms"]["build_level_2_plus"] = 0
for arch in view_conf["architectures"]:
view_id = "{view_conf_id}:{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
view = self.data["views"][view_id]
# Workloads
for workload_id in view["workload_ids"]:
workload = self.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
workload_conf = self.configs["workloads"][workload_conf_id]
if workload_conf_id not in view_all_arches["workloads"]:
view_all_arches["workloads"][workload_conf_id] = {}
view_all_arches["workloads"][workload_conf_id]["workload_conf_id"] = workload_conf_id
view_all_arches["workloads"][workload_conf_id]["name"] = workload_conf["name"]
view_all_arches["workloads"][workload_conf_id]["maintainer"] = workload_conf["maintainer"]
view_all_arches["workloads"][workload_conf_id]["succeeded"] = True
view_all_arches["workloads"][workload_conf_id]["no_warnings"] = True
# ...
if not workload["succeeded"]:
view_all_arches["workloads"][workload_conf_id]["succeeded"] = False
view_all_arches["everything_succeeded"] = False
if workload["warnings"]["message"]:
view_all_arches["workloads"][workload_conf_id]["no_warnings"] = False
view_all_arches["no_warnings"] = False
# Binary Packages
for package in view["pkgs"].values():
# Binary Packages by name
key = "pkgs_by_name"
identifier = package["name"]
# Init
if identifier not in view_all_arches[key]:
view_all_arches[key][identifier] = {}
view_all_arches[key][identifier]["name"] = package["name"]
view_all_arches[key][identifier]["placeholder"] = package["placeholder"]
view_all_arches[key][identifier]["source_name"] = package["source_name"]
view_all_arches[key][identifier]["nevrs"] = {}
view_all_arches[key][identifier]["arches"] = set()
self._init_pkg_or_srpm_relations_fields(view_all_arches[key][identifier], type="rpm")
if package["nevr"] not in view_all_arches[key][identifier]["nevrs"]:
view_all_arches[key][identifier]["nevrs"][package["nevr"]] = set()
view_all_arches[key][identifier]["nevrs"][package["nevr"]].add(arch)
view_all_arches[key][identifier]["arches"].add(arch)
self._populate_pkg_or_srpm_relations_fields(view_all_arches[key][identifier], package, type="rpm", view=view)
# Binary Packages by nevr
key = "pkgs_by_nevr"
identifier = package["nevr"]
if identifier not in view_all_arches[key]:
view_all_arches[key][identifier] = {}
view_all_arches[key][identifier]["name"] = package["name"]
view_all_arches[key][identifier]["placeholder"] = package["placeholder"]
view_all_arches[key][identifier]["evr"] = package["evr"]
view_all_arches[key][identifier]["source_name"] = package["source_name"]
view_all_arches[key][identifier]["arches"] = set()
view_all_arches[key][identifier]["reponame_per_arch"] = {}
view_all_arches[key][identifier]["category"] = None
self._init_pkg_or_srpm_relations_fields(view_all_arches[key][identifier], type="rpm")
view_all_arches[key][identifier]["arches"].add(arch)
view_all_arches[key][identifier]["reponame_per_arch"][arch] = package["reponame"]
self._populate_pkg_or_srpm_relations_fields(view_all_arches[key][identifier], package, type="rpm", view=view)
# Source Packages
for package in view["source_pkgs"].values():
# Source Packages by name
key = "source_pkgs_by_name"
identifier = package["name"]
if identifier not in view_all_arches[key]:
view_all_arches[key][identifier] = {}
view_all_arches[key][identifier]["name"] = package["name"]
view_all_arches[key][identifier]["placeholder"] = package["placeholder"]
if view_all_arches["has_buildroot"]:
view_all_arches[key][identifier]["buildroot_succeeded"] = True
view_all_arches[key][identifier]["buildroot_no_warnings"] = True
view_all_arches[key][identifier]["errors"] = {}
view_all_arches[key][identifier]["warnings"] = {}
view_all_arches[key][identifier]["pkg_names"] = set()
view_all_arches[key][identifier]["pkg_nevrs"] = set()
view_all_arches[key][identifier]["arches"] = set()
view_all_arches[key][identifier]["category"] = None
self._init_pkg_or_srpm_relations_fields(view_all_arches[key][identifier])
if view_all_arches["has_buildroot"]:
if not self.data["buildroot"]["srpms"][repo_id][arch][package["id"]]["succeeded"]:
view_all_arches["everything_succeeded"] = False
view_all_arches[key][identifier]["buildroot_succeeded"] = False
view_all_arches[key][identifier]["errors"][arch] = self.data["buildroot"]["srpms"][repo_id][arch][package["id"]]["errors"]
if self.data["buildroot"]["srpms"][repo_id][arch][package["id"]]["warnings"]["message"]:
view_all_arches["no_warnings"] = False
view_all_arches[key][identifier]["buildroot_no_warnings"] = False
view_all_arches[key][identifier]["warnings"][arch] = self.data["buildroot"]["srpms"][repo_id][arch][package["id"]]["warnings"]
view_all_arches[key][identifier]["arches"].add(arch)
self._populate_pkg_or_srpm_relations_fields(view_all_arches[key][identifier], package, type="srpm")
# Add binary packages to source packages
for pkg_id, pkg in view["pkgs"].items():
source_name = pkg["source_name"]
# Add package names
view_all_arches["source_pkgs_by_name"][source_name]["pkg_names"].add(pkg["name"])
# Add package nevrs
pkg_nevr = "{name}-{evr}".format(
name=pkg["name"],
evr=pkg["evr"]
)
view_all_arches["source_pkgs_by_name"][source_name]["pkg_nevrs"].add(pkg_nevr)
# Modules
for module_id, module in view["modules"].items():
if module_id not in view_all_arches["modules"]:
view_all_arches["modules"][module_id] = {}
view_all_arches["modules"][module_id]["id"] = module_id
# ...
# RPMs
for pkg in view_all_arches["pkgs_by_nevr"].values():
category = None
if pkg["in_workload_ids_env"]:
category = "env"
elif pkg["in_workload_ids_req"]:
category = "req"
elif pkg["in_workload_ids_dep"]:
category = "dep"
elif pkg["in_buildroot_of_srpm_id_env"]:
category = "build_base"
elif pkg["in_buildroot_of_srpm_id_req"] or pkg["in_buildroot_of_srpm_id_dep"]:
if pkg["level_number"] == 1:
category = "build_level_1"
elif pkg["level_number"] > 1:
category = "build_level_2_plus"
view_all_arches["numbers"]["pkgs"][category] += 1
view_all_arches["numbers"]["pkgs"]["runtime"] = view_all_arches["numbers"]["pkgs"]["env"] + view_all_arches["numbers"]["pkgs"]["req"] + view_all_arches["numbers"]["pkgs"]["dep"]
view_all_arches["numbers"]["pkgs"]["build"] = view_all_arches["numbers"]["pkgs"]["build_base"] + view_all_arches["numbers"]["pkgs"]["build_level_1"] + view_all_arches["numbers"]["pkgs"]["build_level_2_plus"]
# SRPMs
for pkg in view_all_arches["source_pkgs_by_name"].values():
category = None
if pkg["in_workload_ids_env"]:
category = "env"
elif pkg["in_workload_ids_req"]:
category = "req"
elif pkg["in_workload_ids_dep"]:
category = "dep"
elif pkg["in_buildroot_of_srpm_id_env"]:
category = "build_base"
elif pkg["in_buildroot_of_srpm_id_req"] or pkg["in_buildroot_of_srpm_id_dep"]:
if pkg["level_number"] == 1:
category = "build_level_1"
elif pkg["level_number"] > 1:
category = "build_level_2_plus"
view_all_arches["numbers"]["srpms"][category] += 1
view_all_arches["numbers"]["srpms"]["runtime"] = \
view_all_arches["numbers"]["srpms"]["env"] + \
view_all_arches["numbers"]["srpms"]["req"] + \
view_all_arches["numbers"]["srpms"]["dep"]
view_all_arches["numbers"]["srpms"]["build"] = \
view_all_arches["numbers"]["srpms"]["build_base"] + \
view_all_arches["numbers"]["srpms"]["build_level_1"] + \
view_all_arches["numbers"]["srpms"]["build_level_2_plus"]
# Done
views_all_arches[view_conf_id] = view_all_arches
self.data["views_all_arches"] = views_all_arches
def _add_unwanted_packages_to_view(self, view, view_conf):
arch = view["arch"]
# Find exclusion lists mathing this view's label(s)
unwanted_conf_ids = set()
for view_label in view_conf["labels"]:
for unwanted_conf_id, unwanted in self.configs["unwanteds"].items():
for unwanted_label in unwanted["labels"]:
if view_label == unwanted_label:
unwanted_conf_ids.add(unwanted_conf_id)
# Dicts
pkgs_unwanted_buildroot = {}
pkgs_unwanted_completely = {}
srpms_unwanted_buildroot = {}
srpms_unwanted_completely = {}
# Populate the dicts
for unwanted_conf_id in unwanted_conf_ids:
unwanted_conf = self.configs["unwanteds"][unwanted_conf_id]
# Pkgs
for pkg_name in unwanted_conf["unwanted_packages"]:
if pkg_name not in pkgs_unwanted_completely:
pkgs_unwanted_completely[pkg_name] = set()
pkgs_unwanted_completely[pkg_name].add(unwanted_conf_id)
# Arch Pkgs
for pkg_name in unwanted_conf["unwanted_arch_packages"][arch]:
if pkg_name not in pkgs_unwanted_completely:
pkgs_unwanted_completely[pkg_name] = set()
pkgs_unwanted_completely[pkg_name].add(unwanted_conf_id)
# SRPMs
for pkg_source_name in unwanted_conf["unwanted_source_packages"]:
if pkg_source_name not in srpms_unwanted_completely:
srpms_unwanted_completely[pkg_source_name] = set()
srpms_unwanted_completely[pkg_source_name].add(unwanted_conf_id)
# Add it to the packages
for pkg_id, pkg in view["pkgs"].items():
pkg_name = pkg["name"]
srpm_name = pkg["source_name"]
if pkg_name in pkgs_unwanted_completely:
list_ids = pkgs_unwanted_completely[pkg_name]
view["pkgs"][pkg_id]["unwanted_completely_in_list_ids"].update(list_ids)
if srpm_name in srpms_unwanted_completely:
list_ids = srpms_unwanted_completely[srpm_name]
view["pkgs"][pkg_id]["unwanted_completely_in_list_ids"].update(list_ids)
# Add it to the srpms
for srpm_id, srpm in view["source_pkgs"].items():
srpm_name = srpm["name"]
if srpm_name in srpms_unwanted_completely:
list_ids = srpms_unwanted_completely[srpm_name]
view["source_pkgs"][srpm_id]["unwanted_completely_in_list_ids"].update(list_ids)
def _add_unwanted_packages_to_views(self):
log("")
log("Adding Unwanted Packages to views...")
log("")
# First, the standard views
for view_conf_id in self.configs["views"]:
view_conf = self.configs["views"][view_conf_id]
if view_conf["type"] == "compose":
if view_conf["buildroot_strategy"] == "root_logs":
for arch in view_conf["architectures"]:
view_id = "{view_conf_id}:{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
view = self.data["views"][view_id]
self._add_unwanted_packages_to_view(view, view_conf)
def _recommend_maintainers(self):
# Packages can be on one or more _levels_:
# level 0 is runtime
# level 1 is build deps of the previous level
# level 2 is build deps of the previous level
# ... etc.
#
# Within a level, they can be on one or more _sublevels_:
# level 0 sublevel 0 is explicitly required
# level 0 sublevel 1 is runtiem deps of the previous sublevel
# level 0 sublevel 2 is runtiem deps of the previous sublevel
# ... etc
# level 1 sublevel 0 is direct build deps of the previous level
# level 1 sublevel 1 is runtime deps of the previous sublevel
# level 1 sublevel 2 is runtiem deps of the previous sublevel
# ... etc
#
# I'll call a combination of these a _score_ because I can't think of
# anything better at this point. It's a tuple!
#
# (0, 0)
# | '-- sub-level 0 == explicitly required
# '---- level 0 == runtime
#
for view_conf_id in self.configs["views"]:
view_conf = self.configs["views"][view_conf_id]
view_all_arches = self.data["views_all_arches"][view_conf_id]
# Skip the obsolete dep_tracker build strategy, that's done by the old OwnershipEngine
if view_conf["type"] == "compose" and view_conf["buildroot_strategy"] == "dep_tracker":
continue
# Also skip addons for now
if view_conf["type"] == "addon":
continue
log(" {}".format(view_conf_id))
# Level 0
level = str(0)
sublevel = str(0)
score = (level, sublevel)
log(" {}".format(score))
# There's not much point in analyzing packages on multple levels.
# For example, if someone explicitly requires glibc, I don't need to track
# details up until the very end of the dependency chain...
this_level_srpms = set()
previous_level_srpms = set()
# Take all explicitly required packages and assign them
# to the maintainer of their workloads.
#
# Or of this is the buildroot levels,
for pkg_name, pkg in view_all_arches["pkgs_by_name"].items():
source_name = pkg["source_name"]
# Only want explicitly required ones
for workload_id in pkg["in_workload_ids_req"]:
workload = self.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
workload_conf = self.configs["workloads"][workload_conf_id]
workload_maintainer = workload_conf["maintainer"]
# 1/ maintainer_recommendation
if workload_maintainer not in pkg["maintainer_recommendation"]:
#pkg["maintainer_recommendation"][workload_maintainer] = set()
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation"][workload_maintainer] = set()
#pkg["maintainer_recommendation"][workload_maintainer].add(score)
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation"][workload_maintainer].add(score)
# 2/ maintainer_recommendation_details
if level not in pkg["maintainer_recommendation_details"]:
#pkg["maintainer_recommendation_details"][level] = {}
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level] = {}
if sublevel not in pkg["maintainer_recommendation_details"][level]:
#pkg["maintainer_recommendation_details"][level][sublevel] = {}
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel] = {}
if workload_maintainer not in pkg["maintainer_recommendation_details"][level][sublevel]:
#pkg["maintainer_recommendation_details"][level][sublevel][workload_maintainer] = {}
#pkg["maintainer_recommendation_details"][level][sublevel][workload_maintainer]["reasons"] = {}
#pkg["maintainer_recommendation_details"][level][sublevel][workload_maintainer]["locations"] = {}
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel][workload_maintainer] = {}
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel][workload_maintainer]["reasons"] = set()
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel][workload_maintainer]["locations"] = set()
#pkg["maintainer_recommendation_details"][level][sublevel][workload_maintainer]["locations"].add(workload_conf_id)
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel][workload_maintainer]["locations"].add(workload_conf_id)
# Lie to the while loop so it runs at least once
level_changes_made = True
level_change_detection = set()
while level_changes_made:
# Level 1 and higher
if int(level) > 0:
level_changes_made = False
log(" {}".format(score))
# Take all the direct build dependencies
# of the previous group, and assign them to the maintainers of packages
# that pulled them in
for pkg_name, pkg in view_all_arches["pkgs_by_name"].items():
source_name = pkg["source_name"]
# Don't process packages on multiple levels. (more details above)
if source_name in previous_level_srpms:
continue
# Look at all SRPMs that directly pull this RPM into the buildroot...
for buildroot_srpm_name in pkg["in_buildroot_of_srpm_name_req"]:
buildroot_srpm = view_all_arches["source_pkgs_by_name"][buildroot_srpm_name]
# ... and if they're in the previous group, assign their maintainer(s)
# But limit this to only the ones with the highest score.
all_the_previous_sublevels_of_this_buildroot_srpm = set()
for buildroot_srpm_maintainer, buildroot_srpm_maintainer_scores in buildroot_srpm["maintainer_recommendation"].items():
for buildroot_srpm_maintainer_score in buildroot_srpm_maintainer_scores:
buildroot_srpm_maintainer_score_level, buildroot_srpm_maintainer_score_sublevel = buildroot_srpm_maintainer_score
if not buildroot_srpm_maintainer_score_level == prev_level:
continue
all_the_previous_sublevels_of_this_buildroot_srpm.add(buildroot_srpm_maintainer_score_sublevel)
if not all_the_previous_sublevels_of_this_buildroot_srpm:
continue
the_highest_sublevel_of_this_buildroot_srpm = min(all_the_previous_sublevels_of_this_buildroot_srpm)
the_score_I_care_about = (prev_level, the_highest_sublevel_of_this_buildroot_srpm)
for buildroot_srpm_maintainer, buildroot_srpm_maintainer_scores in buildroot_srpm["maintainer_recommendation"].items():
if the_score_I_care_about in buildroot_srpm_maintainer_scores:
level_change_detection_tuple = (buildroot_srpm_name, pkg_name)
if level_change_detection_tuple not in level_change_detection:
level_changes_made = True
level_change_detection.add(level_change_detection_tuple)
# 1/ maintainer_recommendation
if buildroot_srpm_maintainer not in pkg["maintainer_recommendation"]:
#pkg["maintainer_recommendation"][workload_maintainer] = set()
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation"][buildroot_srpm_maintainer] = set()
#pkg["maintainer_recommendation"][buildroot_srpm_maintainer].add(score)
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation"][buildroot_srpm_maintainer].add(score)
# 2/ maintainer_recommendation_details
if level not in pkg["maintainer_recommendation_details"]:
#pkg["maintainer_recommendation_details"][level] = {}
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level] = {}
if sublevel not in pkg["maintainer_recommendation_details"][level]:
#pkg["maintainer_recommendation_details"][level][sublevel] = {}
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel] = {}
if buildroot_srpm_maintainer not in pkg["maintainer_recommendation_details"][level][sublevel]:
#pkg["maintainer_recommendation_details"][level][sublevel][buildroot_srpm_maintainer] = {}
#pkg["maintainer_recommendation_details"][level][sublevel][buildroot_srpm_maintainer]["reasons"] = {}
#pkg["maintainer_recommendation_details"][level][sublevel][buildroot_srpm_maintainer]["locations"] = {}
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel][buildroot_srpm_maintainer] = {}
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel][buildroot_srpm_maintainer]["reasons"] = set()
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel][buildroot_srpm_maintainer]["locations"] = set()
#pkg["maintainer_recommendation_details"][level][sublevel][buildroot_srpm_maintainer]["locations"].add(buildroot_srpm_name)
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel][buildroot_srpm_maintainer]["locations"].add(buildroot_srpm_name)
# Time to look at runtime dependencies!
#
# Take all packages that depend on the previous group and assign them
# to the maintainer of their superior package. Do this in a loop until
# there's nothing to assign.
#
# So this will deal with scores 0.1, 0.2, 0.3, ...
# Lie to the while loop so it runs at least once
sublevel_changes_made = True
sublevel_change_detection = set()
while sublevel_changes_made:
# Reset its memories. Let it make some new real memories!!
sublevel_changes_made = False
# Jump another sub-level down
prev_score = score
prev_sublevel = sublevel
#sublevel += 1
sublevel = str(int(sublevel) + 1)
score = (level, sublevel)
log(" {}".format(score))
for pkg_name, pkg in view_all_arches["pkgs_by_name"].items():
source_name = pkg["source_name"]
# Don't process packages on multiple levels. (more details above)
if source_name in previous_level_srpms:
continue
# Look at all of its superior packages (packages that require it)...
for superior_pkg_name in pkg["hard_dependency_of_pkg_names"]:
superior_pkg = view_all_arches["pkgs_by_name"][superior_pkg_name]
superior_srpm_name = superior_pkg["source_name"]
# ... and if they're in the previous group, assign their maintainer(s)
for superior_pkg_maintainer, superior_pkg_maintainer_scores in superior_pkg["maintainer_recommendation"].items():
if prev_score in superior_pkg_maintainer_scores:
sublevel_change_detection_tuple = (superior_pkg_name, pkg_name, superior_pkg_maintainer)
if sublevel_change_detection_tuple in sublevel_change_detection:
continue
else:
sublevel_changes_made = True
sublevel_change_detection.add(sublevel_change_detection_tuple)
# 1/ maintainer_recommendation
if superior_pkg_maintainer not in pkg["maintainer_recommendation"]:
#pkg["maintainer_recommendation"][workload_maintainer] = set()
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation"][superior_pkg_maintainer] = set()
#pkg["maintainer_recommendation"][superior_pkg_maintainer].add(score)
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation"][superior_pkg_maintainer].add(score)
# 2/ maintainer_recommendation_details
if level not in pkg["maintainer_recommendation_details"]:
#pkg["maintainer_recommendation_details"][level] = {}
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level] = {}
if sublevel not in pkg["maintainer_recommendation_details"][level]:
#pkg["maintainer_recommendation_details"][level][sublevel] = {}
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel] = {}
if superior_pkg_maintainer not in pkg["maintainer_recommendation_details"][level][sublevel]:
#pkg["maintainer_recommendation_details"][level][sublevel][superior_pkg_maintainer] = {}
#pkg["maintainer_recommendation_details"][level][sublevel][superior_pkg_maintainer]["reasons"] = {}
#pkg["maintainer_recommendation_details"][level][sublevel][superior_pkg_maintainer]["locations"] = {}
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel][superior_pkg_maintainer] = {}
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel][superior_pkg_maintainer]["reasons"] = set()
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel][superior_pkg_maintainer]["locations"] = set()
# Copy the locations from the superior package one sublevel up
locations = superior_pkg["maintainer_recommendation_details"][level][prev_sublevel][superior_pkg_maintainer]["locations"]
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel][superior_pkg_maintainer]["locations"].update(locations)
reason = (superior_pkg_name, superior_srpm_name, pkg_name)
#pkg["maintainer_recommendation_details"][level][sublevel][superior_pkg_maintainer]["reasons"].add(reason)
self.data["views_all_arches"][view_conf_id]["pkgs_by_name"][pkg_name]["maintainer_recommendation_details"][level][sublevel][superior_pkg_maintainer]["reasons"].add(reason)
# Now add this info to the source packages
for pkg_name, pkg in view_all_arches["pkgs_by_name"].items():
source_name = pkg["source_name"]
# 1/ maintainer_recommendation
for maintainer, maintainer_scores in pkg["maintainer_recommendation"].items():
if maintainer not in self.data["views_all_arches"][view_conf_id]["source_pkgs_by_name"][source_name]["maintainer_recommendation"]:
self.data["views_all_arches"][view_conf_id]["source_pkgs_by_name"][source_name]["maintainer_recommendation"][maintainer] = set()
self.data["views_all_arches"][view_conf_id]["source_pkgs_by_name"][source_name]["maintainer_recommendation"][maintainer].update(maintainer_scores)
# Add it here so it's not processed again in the another level
this_level_srpms.add(source_name)
# 2/ maintainer_recommendation_details
for loop_level, loop_sublevels in pkg["maintainer_recommendation_details"].items():
if loop_level not in self.data["views_all_arches"][view_conf_id]["source_pkgs_by_name"][source_name]["maintainer_recommendation_details"]:
self.data["views_all_arches"][view_conf_id]["source_pkgs_by_name"][source_name]["maintainer_recommendation_details"][loop_level] = {}
for loop_sublevel, maintainers in loop_sublevels.items():
if loop_sublevel not in self.data["views_all_arches"][view_conf_id]["source_pkgs_by_name"][source_name]["maintainer_recommendation_details"][loop_level]:
self.data["views_all_arches"][view_conf_id]["source_pkgs_by_name"][source_name]["maintainer_recommendation_details"][loop_level][loop_sublevel] = {}
for maintainer, maintainer_details in maintainers.items():
if maintainer not in self.data["views_all_arches"][view_conf_id]["source_pkgs_by_name"][source_name]["maintainer_recommendation_details"][loop_level][loop_sublevel]:
self.data["views_all_arches"][view_conf_id]["source_pkgs_by_name"][source_name]["maintainer_recommendation_details"][loop_level][loop_sublevel][maintainer] = {}
self.data["views_all_arches"][view_conf_id]["source_pkgs_by_name"][source_name]["maintainer_recommendation_details"][loop_level][loop_sublevel][maintainer]["reasons"] = set()
self.data["views_all_arches"][view_conf_id]["source_pkgs_by_name"][source_name]["maintainer_recommendation_details"][loop_level][loop_sublevel][maintainer]["locations"] = set()
reasons = maintainer_details["reasons"]
locations = maintainer_details["locations"]
self.data["views_all_arches"][view_conf_id]["source_pkgs_by_name"][source_name]["maintainer_recommendation_details"][loop_level][loop_sublevel][maintainer]["reasons"].update(reasons)
self.data["views_all_arches"][view_conf_id]["source_pkgs_by_name"][source_name]["maintainer_recommendation_details"][loop_level][loop_sublevel][maintainer]["locations"].update(locations)
# And set stuff for the next level
prev_level = level
level = str(int(level) + 1)
#level += 1
sublevel = str(0)
score = (level, sublevel)
previous_level_srpms.update(this_level_srpms)
this_level_srpms = set()
# And elect the best owners for each srpm
for source_name, srpm in view_all_arches["source_pkgs_by_name"].items():
if not srpm["maintainer_recommendation_details"]:
continue
level_numbers = set()
for level_string in srpm["maintainer_recommendation_details"].keys():
level_numbers.add(int(level_string))
lowest_level_int = min(level_numbers)
lowest_level = str(min(level_numbers))
if not srpm["maintainer_recommendation_details"][lowest_level]:
continue
sublevel_numbers = set()
for sublevel_string in srpm["maintainer_recommendation_details"][lowest_level].keys():
sublevel_numbers.add(int(sublevel_string))
lowest_sublevel = str(min(sublevel_numbers))
maintainers_with_the_best_score = set(srpm["maintainer_recommendation_details"][lowest_level][lowest_sublevel].keys())
highest_number_of_dependencies = 0
best_maintainers = set()
for maint in maintainers_with_the_best_score:
# If we're looking at a direct build dependency, count the number of locations == SRPMs that directly need this
# And in all other cases count the reasons == the number of packages that runtime require
# (in case of 0,0 len(reasons) is always 1 as it just says "directly required" so that works fine)
if lowest_level_int > 0 and lowest_sublevel == "0":
number_of_dependencies = len(srpm["maintainer_recommendation_details"][lowest_level][lowest_sublevel][maint]["locations"])
else:
number_of_dependencies = len(srpm["maintainer_recommendation_details"][lowest_level][lowest_sublevel][maint]["reasons"])
if number_of_dependencies > highest_number_of_dependencies:
highest_number_of_dependencies = number_of_dependencies
best_maintainers = set()
if number_of_dependencies == highest_number_of_dependencies:
best_maintainers.add(maint)
self.data["views_all_arches"][view_conf_id]["source_pkgs_by_name"][source_name]["best_maintainers"].update(best_maintainers)
log("")
log(" DONE!")
log("")
def analyze_things(self):
log("")
log("###############################################################################")
log("### Analyzing stuff! ##########################################################")
log("###############################################################################")
log("")
self.data["pkgs"] = {}
self.data["envs"] = {}
self.data["workloads"] = {}
self.data["views"] = {}
with tempfile.TemporaryDirectory() as tmp:
if self.settings["dnf_cache_dir_override"]:
self.tmp_dnf_cachedir = self.settings["dnf_cache_dir_override"]
else:
self.tmp_dnf_cachedir = os.path.join(tmp, "dnf_cachedir")
self.tmp_installroots = os.path.join(tmp, "installroots")
# List of supported arches
all_arches = self.settings["allowed_arches"]
# Packages
log("")
log("===== Analyzing Repos & Packages =====")
log("")
self.data["repos"] = {}
for _,repo in self.configs["repos"].items():
repo_id = repo["id"]
self.data["pkgs"][repo_id] = {}
self.data["repos"][repo_id] = {}
for arch in repo["source"]["architectures"]:
self.data["pkgs"][repo_id][arch] = self._analyze_pkgs(repo, arch)
# Reading the optional composeinfo
self.data["repos"][repo_id]["compose_date"] = None
self.data["repos"][repo_id]["compose_days_ago"] = 0
if repo["source"]["composeinfo"]:
# At this point, this is all I can do. Hate me or not, it gets us
# what we need and won't brake anything in case things go badly.
try:
with urllib.request.urlopen(repo["source"]["composeinfo"]) as response:
composeinfo_raw_response = response.read()
composeinfo_data = json.loads(composeinfo_raw_response)
self.data["repos"][repo_id]["composeinfo"] = composeinfo_data
compose_date = datetime.datetime.strptime(composeinfo_data["payload"]["compose"]["date"], "%Y%m%d").date()
self.data["repos"][repo_id]["compose_date"] = compose_date.strftime("%Y-%m-%d")
date_now = datetime.datetime.now().date()
self.data["repos"][repo_id]["compose_days_ago"] = (date_now - compose_date).days
except:
pass
# Environments
log("")
log("===== Analyzing Environments =====")
log("")
self._analyze_envs()
# Workloads
log("")
log("===== Analyzing Workloads =====")
log("")
self._analyze_workloads()
# Views
#
# This creates:
# data["views"][view_id]["id"]
# data["views"][view_id]["view_conf_id"]
# data["views"][view_id]["arch"]
# data["views"][view_id]["workload_ids"]
# data["views"][view_id]["pkgs"]
# data["views"][view_id]["source_pkgs"]
# data["views"][view_id]["modules"]
#
log("")
log("===== Analyzing Views =====")
log("")
self._analyze_views()
# Buildroot
# This is partially similar to workloads, because it's resolving
# the full dependency tree of the direct build dependencies of SRPMs
#
# So compared to workloads:
# direct build dependencies are like required packages in workloads
# the dependencies are like dependencies in workloads
# the "build" group is like environments in workloads
#
# This completely creates:
# data["buildroot"]["koji_srpms"][koji_id][arch][srpm_id]...
# data["buildroot"]["srpms"][repo_id][arch][srpm_id]...
#
log("")
log("===== Analyzing Buildroot =====")
log("")
self._analyze_buildroot()
# Add buildroot packages to views
#
# Further extends the following with buildroot packages:
# data["views"][view_id]["pkgs"]
# data["views"][view_id]["source_pkgs"]
#
log("")
log("===== Adding Buildroot to Views =====")
log("")
self._add_buildroot_to_views()
# Unwanted packages
log("")
log("===== Adding Unwanted Packages to Views =====")
log("")
self._add_unwanted_packages_to_views()
# Generate combined views for all arches
log("")
log("===== Generating views_all_arches =====")
log("")
self. _generate_views_all_arches()
# Recommend package maintainers in views
log("")
log("===== Recommending maintainers =====")
log("")
self._recommend_maintainers()
# Finally, save the cache for next time
dump_data(self.settings["root_log_deps_cache_path"], self.cache["root_log_deps"]["next"])
return self.data
###############################################################################
### Query gives an easy access to the data! ###################################
###############################################################################
class Query():
def __init__(self, data, configs, settings):
self.data = data
self.configs = configs
self.settings = settings
self.computed_data = {}
def size(self, num, suffix='B'):
for unit in ['','k','M','G']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'T', suffix)
@lru_cache(maxsize = None)
def workloads(self, workload_conf_id, env_conf_id, repo_id, arch, list_all=False, output_change=None):
# accepts none in any argument, and in those cases, answers for all instances
# It can output just one part of the id.
# That's useful to, for example, list all arches associated with a workload_conf_id
if output_change:
list_all = True
if output_change not in ["workload_conf_ids", "env_conf_ids", "repo_ids", "arches"]:
raise ValueError('output_change must be one of: "workload_conf_ids", "env_conf_ids", "repo_ids", "arches"')
matching_ids = set()
# list considered workload_conf_ids
if workload_conf_id:
workload_conf_ids = [workload_conf_id]
else:
workload_conf_ids = self.configs["workloads"].keys()
# list considered env_conf_ids
if env_conf_id:
env_conf_ids = [env_conf_id]
else:
env_conf_ids = self.configs["envs"].keys()
# list considered repo_ids
if repo_id:
repo_ids = [repo_id]
else:
repo_ids = self.configs["repos"].keys()
# list considered arches
if arch:
arches = [arch]
else:
arches = self.settings["allowed_arches"]
# And now try looping through all of that, and return True on a first occurance
# This is a terrible amount of loops. But most cases will have just one item
# in most of those, anyway. No one is expected to run this method with
# a "None" for every argument!
for workload_conf_id in workload_conf_ids:
for env_conf_id in env_conf_ids:
for repo_id in repo_ids:
for arch in arches:
workload_id = "{workload_conf_id}:{env_conf_id}:{repo_id}:{arch}".format(
workload_conf_id=workload_conf_id,
env_conf_id=env_conf_id,
repo_id=repo_id,
arch=arch
)
if workload_id in self.data["workloads"].keys():
if not list_all:
return True
if output_change:
if output_change == "workload_conf_ids":
matching_ids.add(workload_conf_id)
if output_change == "env_conf_ids":
matching_ids.add(env_conf_id)
if output_change == "repo_ids":
matching_ids.add(repo_id)
if output_change == "arches":
matching_ids.add(arch)
else:
matching_ids.add(workload_id)
if not list_all:
return False
return sorted(list(matching_ids))
@lru_cache(maxsize = None)
def workloads_id(self, id, list_all=False, output_change=None):
# Accepts both env and workload ID, and returns workloads that match that
id_components = id.split(":")
# It's an env!
if len(id_components) == 3:
env_conf_id = id_components[0]
repo_id = id_components[1]
arch = id_components[2]
return self.workloads(None, env_conf_id, repo_id, arch, list_all, output_change)
# It's a workload! Why would you want that, anyway?!
if len(id_components) == 4:
workload_conf_id = id_components[0]
env_conf_id = id_components[1]
repo_id = id_components[2]
arch = id_components[3]
return self.workloads(workload_conf_id, env_conf_id, repo_id, arch, list_all, output_change)
raise ValueError("That seems to be an invalid ID!")
@lru_cache(maxsize = None)
def envs(self, env_conf_id, repo_id, arch, list_all=False, output_change=None):
# accepts none in any argument, and in those cases, answers for all instances
# It can output just one part of the id.
# That's useful to, for example, list all arches associated with a workload_conf_id
if output_change:
list_all = True
if output_change not in ["env_conf_ids", "repo_ids", "arches"]:
raise ValueError('output_change must be one of: "env_conf_ids", "repo_ids", "arches"')
matching_ids = set()
# list considered env_conf_ids
if env_conf_id:
env_conf_ids = [env_conf_id]
else:
env_conf_ids = self.configs["envs"].keys()
# list considered repo_ids
if repo_id:
repo_ids = [repo_id]
else:
repo_ids = self.configs["repos"].keys()
# list considered arches
if arch:
arches = [arch]
else:
arches = self.settings["allowed_arches"]
# And now try looping through all of that, and return True on a first occurance
# This is a terrible amount of loops. But most cases will have just one item
# in most of those, anyway. No one is expected to run this method with
# a "None" for every argument!
for env_conf_id in env_conf_ids:
for repo_id in repo_ids:
for arch in arches:
env_id = "{env_conf_id}:{repo_id}:{arch}".format(
env_conf_id=env_conf_id,
repo_id=repo_id,
arch=arch
)
if env_id in self.data["envs"].keys():
if not list_all:
return True
if output_change:
if output_change == "env_conf_ids":
matching_ids.add(env_conf_id)
if output_change == "repo_ids":
matching_ids.add(repo_id)
if output_change == "arches":
matching_ids.add(arch)
else:
matching_ids.add(env_id)
# This means nothing has been found!
if not list_all:
return False
return sorted(list(matching_ids))
@lru_cache(maxsize = None)
def envs_id(self, id, list_all=False, output_change=None):
# Accepts both env and workload ID, and returns workloads that match that
id_components = id.split(":")
# It's an env!
if len(id_components) == 3:
env_conf_id = id_components[0]
repo_id = id_components[1]
arch = id_components[2]
return self.envs(env_conf_id, repo_id, arch, list_all, output_change)
# It's a workload!
if len(id_components) == 4:
workload_conf_id = id_components[0]
env_conf_id = id_components[1]
repo_id = id_components[2]
arch = id_components[3]
return self.envs(env_conf_id, repo_id, arch, list_all, output_change)
raise ValueError("That seems to be an invalid ID!")
@lru_cache(maxsize = None)
def workload_pkgs(self, workload_conf_id, env_conf_id, repo_id, arch, output_change=None):
# Warning: mixing repos and arches works, but might cause mess on the output
# Default output is just a flat list. Extra fields will be added into each package:
# q_in - set of workload_ids including this pkg
# q_required_in - set of workload_ids where this pkg is required (top-level)
# q_env_in - set of workload_ids where this pkg is in env
# q_arch - architecture
# Other outputs:
# - "ids" — a list ids
# - "binary_names" — a list of RPM names
# - "source_nvr" — a list of SRPM NVRs
# - "source_names" — a list of SRPM names
if output_change:
list_all = True
if output_change not in ["ids", "binary_names", "source_nvr", "source_names"]:
raise ValueError('output_change must be one of: "ids", "binary_names", "source_nvr", "source_names"')
# Step 1: get all the matching workloads!
workload_ids = self.workloads(workload_conf_id, env_conf_id, repo_id, arch, list_all=True)
# I'll need repo_ids and arches to access the packages
repo_ids = self.workloads(workload_conf_id, env_conf_id, repo_id, arch, output_change="repo_ids")
arches = self.workloads(workload_conf_id, env_conf_id, repo_id, arch, output_change="arches")
# Replicating the same structure as in data["pkgs"]
# That is: [repo_id][arch][pkg_id]
pkgs = {}
for repo_id in repo_ids:
pkgs[repo_id] = {}
for arch in arches:
pkgs[repo_id][arch] = {}
# Workloads are already paired with envs, repos, and arches
# (there is one for each combination)
for workload_id in workload_ids:
workload = self.data["workloads"][workload_id]
workload_arch = workload["arch"]
workload_repo_id = workload["repo_id"]
workload_conf_id = workload["workload_conf_id"]
workload_conf = self.configs["workloads"][workload_conf_id]
# First, get all pkgs in the env
for pkg_id in workload["pkg_env_ids"]:
# Add it to the list if it's not there already.
# Create a copy since it's gonna be modified, and include only what's needed
pkg = self.data["pkgs"][workload_repo_id][workload_arch][pkg_id]
if pkg_id not in pkgs[workload_repo_id][workload_arch]:
pkgs[workload_repo_id][workload_arch][pkg_id] = {}
pkgs[workload_repo_id][workload_arch][pkg_id]["id"] = pkg_id
pkgs[workload_repo_id][workload_arch][pkg_id]["name"] = pkg["name"]
pkgs[workload_repo_id][workload_arch][pkg_id]["evr"] = pkg["evr"]
pkgs[workload_repo_id][workload_arch][pkg_id]["arch"] = pkg["arch"]
pkgs[workload_repo_id][workload_arch][pkg_id]["installsize"] = pkg["installsize"]
pkgs[workload_repo_id][workload_arch][pkg_id]["description"] = pkg["description"]
pkgs[workload_repo_id][workload_arch][pkg_id]["summary"] = pkg["summary"]
pkgs[workload_repo_id][workload_arch][pkg_id]["source_name"] = pkg["source_name"]
pkgs[workload_repo_id][workload_arch][pkg_id]["q_arch"] = workload_arch
pkgs[workload_repo_id][workload_arch][pkg_id]["q_in"] = set()
pkgs[workload_repo_id][workload_arch][pkg_id]["q_required_in"] = set()
pkgs[workload_repo_id][workload_arch][pkg_id]["q_env_in"] = set()
# It's here, so add it
pkgs[workload_repo_id][workload_arch][pkg_id]["q_in"].add(workload_id)
# Browsing env packages, so add it
pkgs[workload_repo_id][workload_arch][pkg_id]["q_env_in"].add(workload_id)
# Is it required?
if pkg["name"] in self.configs["workloads"][workload_conf_id]["packages"]:
pkgs[workload_repo_id][workload_arch][pkg_id]["q_required_in"].add(workload_id)
if pkg["name"] in self.configs["workloads"][workload_conf_id]["arch_packages"][workload_arch]:
pkgs[workload_repo_id][workload_arch][pkg_id]["q_required_in"].add(workload_id)
# Second, add all the other packages
for pkg_id in workload["pkg_added_ids"]:
# Add it to the list if it's not there already
# and initialize extra fields
pkg = self.data["pkgs"][workload_repo_id][workload_arch][pkg_id]
if pkg_id not in pkgs[workload_repo_id][workload_arch]:
pkgs[workload_repo_id][workload_arch][pkg_id] = {}
pkgs[workload_repo_id][workload_arch][pkg_id]["id"] = pkg_id
pkgs[workload_repo_id][workload_arch][pkg_id]["name"] = pkg["name"]
pkgs[workload_repo_id][workload_arch][pkg_id]["evr"] = pkg["evr"]
pkgs[workload_repo_id][workload_arch][pkg_id]["arch"] = pkg["arch"]
pkgs[workload_repo_id][workload_arch][pkg_id]["installsize"] = pkg["installsize"]
pkgs[workload_repo_id][workload_arch][pkg_id]["description"] = pkg["description"]
pkgs[workload_repo_id][workload_arch][pkg_id]["summary"] = pkg["summary"]
pkgs[workload_repo_id][workload_arch][pkg_id]["source_name"] = pkg["source_name"]
pkgs[workload_repo_id][workload_arch][pkg_id]["q_arch"] = workload_arch
pkgs[workload_repo_id][workload_arch][pkg_id]["q_in"] = set()
pkgs[workload_repo_id][workload_arch][pkg_id]["q_required_in"] = set()
pkgs[workload_repo_id][workload_arch][pkg_id]["q_env_in"] = set()
# It's here, so add it
pkgs[workload_repo_id][workload_arch][pkg_id]["q_in"].add(workload_id)
# Not adding it to q_env_in
# Is it required?
if pkg["name"] in self.configs["workloads"][workload_conf_id]["packages"]:
pkgs[workload_repo_id][workload_arch][pkg_id]["q_required_in"].add(workload_id)
if pkg["name"] in self.configs["workloads"][workload_conf_id]["arch_packages"][workload_arch]:
pkgs[workload_repo_id][workload_arch][pkg_id]["q_required_in"].add(workload_id)
# Third, add package placeholders if any
for placeholder_id in workload["pkg_placeholder_ids"]:
placeholder = workload_conf["package_placeholders"]["pkgs"][pkg_id_to_name(placeholder_id)]
if placeholder_id not in pkgs[workload_repo_id][workload_arch]:
pkgs[workload_repo_id][workload_arch][placeholder_id] = {}
pkgs[workload_repo_id][workload_arch][placeholder_id]["id"] = placeholder_id
pkgs[workload_repo_id][workload_arch][placeholder_id]["name"] = placeholder["name"]
pkgs[workload_repo_id][workload_arch][placeholder_id]["evr"] = "000-placeholder"
pkgs[workload_repo_id][workload_arch][placeholder_id]["arch"] = "placeholder"
pkgs[workload_repo_id][workload_arch][placeholder_id]["installsize"] = 0
pkgs[workload_repo_id][workload_arch][placeholder_id]["description"] = placeholder["description"]
pkgs[workload_repo_id][workload_arch][placeholder_id]["summary"] = placeholder["description"]
pkgs[workload_repo_id][workload_arch][placeholder_id]["source_name"] = placeholder["srpm"]
pkgs[workload_repo_id][workload_arch][placeholder_id]["q_arch"] = workload_arch
pkgs[workload_repo_id][workload_arch][placeholder_id]["q_in"] = set()
pkgs[workload_repo_id][workload_arch][placeholder_id]["q_required_in"] = set()
pkgs[workload_repo_id][workload_arch][placeholder_id]["q_env_in"] = set()
# It's here, so add it
pkgs[workload_repo_id][workload_arch][placeholder_id]["q_in"].add(workload_id)
# All placeholders are required
pkgs[workload_repo_id][workload_arch][placeholder_id]["q_required_in"].add(workload_id)
# Is it supposed to only output ids?
if output_change:
pkg_names = set()
for repo_id in repo_ids:
for arch in arches:
for pkg_id, pkg in pkgs[repo_id][arch].items():
if output_change == "ids":
pkg_names.add(pkg["id"])
elif output_change == "binary_names":
pkg_names.add(pkg["name"])
elif output_change == "source_nvr":
pkg_names.add(pkg["sourcerpm"])
elif output_change == "source_names":
pkg_names.add(pkg["source_name"])
names_sorted = sorted(list(pkg_names))
return names_sorted
# And now I just need to flatten that dict and return all packages as a list
final_pkg_list = []
for repo_id in repo_ids:
for arch in arches:
for pkg_id, pkg in pkgs[repo_id][arch].items():
final_pkg_list.append(pkg)
# And sort them by nevr which is their ID
final_pkg_list_sorted = sorted(final_pkg_list, key=lambda k: k['id'])
return final_pkg_list_sorted
@lru_cache(maxsize = None)
def workload_pkgs_id(self, id, output_change=None):
# Accepts both env and workload ID, and returns pkgs for workloads that match
id_components = id.split(":")
# It's an env!
if len(id_components) == 3:
env_conf_id = id_components[0]
repo_id = id_components[1]
arch = id_components[2]
return self.workload_pkgs(None, env_conf_id, repo_id, arch, output_change)
# It's a workload!
if len(id_components) == 4:
workload_conf_id = id_components[0]
env_conf_id = id_components[1]
repo_id = id_components[2]
arch = id_components[3]
return self.workload_pkgs(workload_conf_id, env_conf_id, repo_id, arch, output_change)
raise ValueError("That seems to be an invalid ID!")
@lru_cache(maxsize = None)
def env_pkgs(self, env_conf_id, repo_id, arch):
# Warning: mixing repos and arches works, but might cause mess on the output
# Output is just a flat list. Extra fields will be added into each package:
# q_in - set of env_ids including this pkg
# q_required_in - set of env_ids where this pkg is required (top-level)
# q_arch - architecture
# Step 1: get all the matching envs!
env_ids = self.envs(env_conf_id, repo_id, arch, list_all=True)
# I'll need repo_ids and arches to access the packages
repo_ids = self.envs(env_conf_id, repo_id, arch, output_change="repo_ids")
arches = self.envs(env_conf_id, repo_id, arch, output_change="arches")
# Replicating the same structure as in data["pkgs"]
# That is: [repo_id][arch][pkg_id]
pkgs = {}
for repo_id in repo_ids:
pkgs[repo_id] = {}
for arch in arches:
pkgs[repo_id][arch] = {}
# envs are already paired with repos, and arches
# (there is one for each combination)
for env_id in env_ids:
env = self.data["envs"][env_id]
env_arch = env["arch"]
env_repo_id = env["repo_id"]
env_conf_id = env["env_conf_id"]
for pkg_id in env["pkg_ids"]:
# Add it to the list if it's not there already.
# Create a copy since it's gonna be modified, and include only what's needed
pkg = self.data["pkgs"][env_repo_id][env_arch][pkg_id]
if pkg_id not in pkgs[env_repo_id][env_arch]:
pkgs[env_repo_id][env_arch][pkg_id] = {}
pkgs[env_repo_id][env_arch][pkg_id]["id"] = pkg_id
pkgs[env_repo_id][env_arch][pkg_id]["name"] = pkg["name"]
pkgs[env_repo_id][env_arch][pkg_id]["evr"] = pkg["evr"]
pkgs[env_repo_id][env_arch][pkg_id]["arch"] = pkg["arch"]
pkgs[env_repo_id][env_arch][pkg_id]["installsize"] = pkg["installsize"]
pkgs[env_repo_id][env_arch][pkg_id]["description"] = pkg["description"]
pkgs[env_repo_id][env_arch][pkg_id]["summary"] = pkg["summary"]
pkgs[env_repo_id][env_arch][pkg_id]["source_name"] = pkg["source_name"]
pkgs[env_repo_id][env_arch][pkg_id]["sourcerpm"] = pkg["sourcerpm"]
pkgs[env_repo_id][env_arch][pkg_id]["q_arch"] = env_arch
pkgs[env_repo_id][env_arch][pkg_id]["q_in"] = set()
pkgs[env_repo_id][env_arch][pkg_id]["q_required_in"] = set()
# It's here, so add it
pkgs[env_repo_id][env_arch][pkg_id]["q_in"].add(env_id)
# Is it required?
if pkg["name"] in self.configs["envs"][env_conf_id]["packages"]:
pkgs[env_repo_id][env_arch][pkg_id]["q_required_in"].add(env_id)
if pkg["name"] in self.configs["envs"][env_conf_id]["arch_packages"][env_arch]:
pkgs[env_repo_id][env_arch][pkg_id]["q_required_in"].add(env_id)
# And now I just need to flatten that dict and return all packages as a list
final_pkg_list = []
for repo_id in repo_ids:
for arch in arches:
for pkg_id, pkg in pkgs[repo_id][arch].items():
final_pkg_list.append(pkg)
# And sort them by nevr which is their ID
final_pkg_list_sorted = sorted(final_pkg_list, key=lambda k: k['id'])
return final_pkg_list_sorted
@lru_cache(maxsize = None)
def env_pkgs_id(self, id):
# Accepts both env and workload ID, and returns pkgs for envs that match
id_components = id.split(":")
| e)
def workload_size(self, workload_conf_id, env_conf_id, repo_id, arch):
# A total size of a workload (or multiple combined!)
pkgs = self.workload_pkgs(workload_conf_id, env_conf_id, repo_id, arch)
size = 0
for pkg in pkgs:
size += pkg["installsize"]
return size
@lru_cache(maxsize = None)
def env_size(self, env_conf_id, repo_id, arch):
# A total size of an env (or multiple combined!)
pkgs = self.env_pkgs(env_conf_id, repo_id, arch)
size = 0
for pkg in pkgs:
size += pkg["installsize"]
return size
@lru_cache(maxsize = None)
def workload_size_id(self, id):
# Accepts both env and workload ID, and returns pkgs for envs that match
id_components = id.split(":")
# It's an env!
if len(id_components) == 3:
env_conf_id = id_components[0]
repo_id = id_components[1]
arch = id_components[2]
return self.workload_size(None, env_conf_id, repo_id, arch)
# It's a workload!
if len(id_components) == 4:
workload_conf_id = id_components[0]
env_conf_id = id_components[1]
repo_id = id_components[2]
arch = id_components[3]
return self.workload_size(workload_conf_id, env_conf_id, repo_id, arch)
raise ValueError("That seems to be an invalid ID!")
@lru_cache(maxsize = None)
def env_size_id(self, id):
# Accepts both env and workload ID, and returns pkgs for envs that match
id_components = id.split(":")
# It's an env!
if len(id_components) == 3:
env_conf_id = id_components[0]
repo_id = id_components[1]
arch = id_components[2]
return self.env_size(env_conf_id, repo_id, arch)
# It's a workload!
if len(id_components) == 4:
workload_conf_id = id_components[0]
env_conf_id = id_components[1]
repo_id = id_components[2]
arch = id_components[3]
return self.env_size(env_conf_id, repo_id, arch)
raise ValueError("That seems to be an invalid ID!")
def workload_url_slug(self, workload_conf_id, env_conf_id, repo_id, arch):
slug = "{workload_conf_id}--{env_conf_id}--{repo_id}--{arch}".format(
workload_conf_id=workload_conf_id,
env_conf_id=env_conf_id,
repo_id=repo_id,
arch=arch
)
return slug
def env_url_slug(self, env_conf_id, repo_id, arch):
slug = "{env_conf_id}--{repo_id}--{arch}".format(
env_conf_id=env_conf_id,
repo_id=repo_id,
arch=arch
)
return slug
def workload_id_string(self, workload_conf_id, env_conf_id, repo_id, arch):
slug = "{workload_conf_id}:{env_conf_id}:{repo_id}:{arch}".format(
workload_conf_id=workload_conf_id,
env_conf_id=env_conf_id,
repo_id=repo_id,
arch=arch
)
return slug
def env_id_string(self, env_conf_id, repo_id, arch):
slug = "{env_conf_id}:{repo_id}:{arch}".format(
env_conf_id=env_conf_id,
repo_id=repo_id,
arch=arch
)
return slug
def url_slug_id(self, any_id):
return any_id.replace(":", "--")
@lru_cache(maxsize = None)
def workloads_in_view(self, view_conf_id, arch, maintainer=None):
view_conf = self.configs["views"][view_conf_id]
repo_id = view_conf["repository"]
labels = view_conf["labels"]
if arch and arch not in self.settings["allowed_arches"]:
raise ValueError("Unsupported arch: {arch}".format(
arch=arch
))
if arch and arch not in self.arches_in_view(view_conf_id):
return []
# First, get a set of workloads matching the repo and the arch
too_many_workload_ids = set()
workload_ids = self.workloads(None,None,repo_id,arch,list_all=True)
too_many_workload_ids.update(workload_ids)
# Second, limit that set further by matching the label
final_workload_ids = set()
for workload_id in too_many_workload_ids:
workload = self.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
workload_conf = self.configs["workloads"][workload_conf_id]
if maintainer:
workload_maintainer = workload_conf["maintainer"]
if workload_maintainer != maintainer:
continue
workload_labels = workload["labels"]
for workload_label in workload_labels:
if workload_label in labels:
final_workload_ids.add(workload_id)
return sorted(list(final_workload_ids))
@lru_cache(maxsize = None)
def arches_in_view(self, view_conf_id, maintainer=None):
if len(self.configs["views"][view_conf_id]["architectures"]):
arches = self.configs["views"][view_conf_id]["architectures"]
return sorted(arches)
return self.settings["allowed_arches"]
@lru_cache(maxsize = None)
def pkgs_in_view(self, view_conf_id, arch, output_change=None, maintainer=None):
# Extra fields will be added into each package:
# q_in - set of workload_ids including this pkg
# q_required_in - set of workload_ids where this pkg is required (top-level)
# q_env_in - set of workload_ids where this pkg is in env
# q_dep_in - set of workload_ids where this pkg is a dependency (that means not required)
# q_maintainers - set of workload maintainers
# Other outputs:
# - "ids" — a list of ids (NEVRA)
# - "nevrs" — a list of NEVR
# - "binary_names" — a list of RPM names
# - "source_nvr" — a list of SRPM NVRs
# - "source_names" — a list of SRPM names
if output_change:
list_all = True
if output_change not in ["ids", "nevrs", "binary_names", "source_nvr", "source_names"]:
raise ValueError('output_change must be one of: "ids", "nevrs", "binary_names", "source_nvr", "source_names"')
# -----
# Step 1: get all packages from all workloads in this view
# -----
workload_ids = self.workloads_in_view(view_conf_id, arch)
repo_id = self.configs["views"][view_conf_id]["repository"]
# This has just one repo and one arch, so a flat list of IDs is enough
pkgs = {}
for workload_id in workload_ids:
workload = self.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
workload_conf = self.configs["workloads"][workload_conf_id]
# First, get all pkgs in the env
for pkg_id in workload["pkg_env_ids"]:
# Add it to the list if it's not there already.
# Create a copy since it's gonna be modified, and include only what's needed
pkg = self.data["pkgs"][repo_id][arch][pkg_id]
if pkg_id not in pkgs:
pkgs[pkg_id] = {}
pkgs[pkg_id]["id"] = pkg_id
pkgs[pkg_id]["name"] = pkg["name"]
pkgs[pkg_id]["evr"] = pkg["evr"]
pkgs[pkg_id]["arch"] = pkg["arch"]
pkgs[pkg_id]["installsize"] = pkg["installsize"]
pkgs[pkg_id]["description"] = pkg["description"]
pkgs[pkg_id]["summary"] = pkg["summary"]
pkgs[pkg_id]["source_name"] = pkg["source_name"]
pkgs[pkg_id]["sourcerpm"] = pkg["sourcerpm"]
pkgs[pkg_id]["q_arch"] = arch
pkgs[pkg_id]["q_in"] = set()
pkgs[pkg_id]["q_required_in"] = set()
pkgs[pkg_id]["q_dep_in"] = set()
pkgs[pkg_id]["q_env_in"] = set()
pkgs[pkg_id]["q_maintainers"] = set()
# It's here, so add it
pkgs[pkg_id]["q_in"].add(workload_id)
# Browsing env packages, so add it
pkgs[pkg_id]["q_env_in"].add(workload_id)
# Is it required?
if pkg["name"] in self.configs["workloads"][workload_conf_id]["packages"]:
pkgs[pkg_id]["q_required_in"].add(workload_id)
if pkg["name"] in self.configs["workloads"][workload_conf_id]["arch_packages"][arch]:
pkgs[pkg_id]["q_required_in"].add(workload_id)
# Second, add all the other packages
for pkg_id in workload["pkg_added_ids"]:
# Add it to the list if it's not there already
# and initialize extra fields
pkg = self.data["pkgs"][repo_id][arch][pkg_id]
if pkg_id not in pkgs:
pkgs[pkg_id] = {}
pkgs[pkg_id]["id"] = pkg_id
pkgs[pkg_id]["name"] = pkg["name"]
pkgs[pkg_id]["evr"] = pkg["evr"]
pkgs[pkg_id]["arch"] = pkg["arch"]
pkgs[pkg_id]["installsize"] = pkg["installsize"]
pkgs[pkg_id]["description"] = pkg["description"]
pkgs[pkg_id]["summary"] = pkg["summary"]
pkgs[pkg_id]["source_name"] = pkg["source_name"]
pkgs[pkg_id]["sourcerpm"] = pkg["sourcerpm"]
pkgs[pkg_id]["q_arch"] = arch
pkgs[pkg_id]["q_in"] = set()
pkgs[pkg_id]["q_required_in"] = set()
pkgs[pkg_id]["q_dep_in"] = set()
pkgs[pkg_id]["q_env_in"] = set()
pkgs[pkg_id]["q_maintainers"] = set()
# It's here, so add it
pkgs[pkg_id]["q_in"].add(workload_id)
# Not adding it to q_env_in
# Is it required?
if pkg["name"] in self.configs["workloads"][workload_conf_id]["packages"]:
pkgs[pkg_id]["q_required_in"].add(workload_id)
elif pkg["name"] in self.configs["workloads"][workload_conf_id]["arch_packages"][arch]:
pkgs[pkg_id]["q_required_in"].add(workload_id)
else:
pkgs[pkg_id]["q_dep_in"].add(workload_id)
# Maintainer
pkgs[pkg_id]["q_maintainers"].add(workload_conf["maintainer"])
# Third, add package placeholders if any
for placeholder_id in workload["pkg_placeholder_ids"]:
placeholder = workload_conf["package_placeholders"]["pkgs"][pkg_id_to_name(placeholder_id)]
if placeholder_id not in pkgs:
pkgs[placeholder_id] = {}
pkgs[placeholder_id]["id"] = placeholder_id
pkgs[placeholder_id]["name"] = placeholder["name"]
pkgs[placeholder_id]["evr"] = "000-placeholder"
pkgs[placeholder_id]["arch"] = "placeholder"
pkgs[placeholder_id]["installsize"] = 0
pkgs[placeholder_id]["description"] = placeholder["description"]
pkgs[placeholder_id]["summary"] = placeholder["description"]
pkgs[placeholder_id]["source_name"] = placeholder["srpm"]
pkgs[placeholder_id]["sourcerpm"] = "{}-000-placeholder".format(placeholder["srpm"])
pkgs[placeholder_id]["q_arch"] = arch
pkgs[placeholder_id]["q_in"] = set()
pkgs[placeholder_id]["q_required_in"] = set()
pkgs[placeholder_id]["q_dep_in"] = set()
pkgs[placeholder_id]["q_env_in"] = set()
pkgs[placeholder_id]["q_maintainers"] = set()
# It's here, so add it
pkgs[placeholder_id]["q_in"].add(workload_id)
# All placeholders are required
pkgs[placeholder_id]["q_required_in"].add(workload_id)
# Maintainer
pkgs[placeholder_id]["q_maintainers"].add(workload_conf["maintainer"])
# -----
# Step 2: narrow the package list down based on various criteria
# -----
# Is this an addon view?
# Then I need to remove all packages that are already
# in the base view
view_conf = self.configs["views"][view_conf_id]
if view_conf["type"] == "addon":
base_view_id = view_conf["base_view_id"]
# I always need to get all package IDs
base_pkg_ids = self.pkgs_in_view(base_view_id, arch, output_change="ids")
for base_pkg_id in base_pkg_ids:
if base_pkg_id in pkgs:
del pkgs[base_pkg_id]
# Filtering by a maintainer?
# Filter out packages not belonging to the maintainer
# It's filtered out at this stage to keep the context of fields like
# "q_required_in" etc. to be the whole view
pkg_ids_to_delete = set()
if maintainer:
for pkg_id, pkg in pkgs.items():
if maintainer not in pkg["q_maintainers"]:
pkg_ids_to_delete.add(pkg_id)
for pkg_id in pkg_ids_to_delete:
del pkgs[pkg_id]
# -----
# Step 3: Make the output to be the right format
# -----
# Is it supposed to only output ids?
if output_change:
pkg_names = set()
for pkg_id, pkg in pkgs.items():
if output_change == "ids":
pkg_names.add(pkg["id"])
elif output_change == "nevrs":
pkg_names.add("{name}-{evr}".format(
name=pkg["name"],
evr=pkg["evr"]
))
elif output_change == "binary_names":
pkg_names.add(pkg["name"])
elif output_change == "source_nvr":
pkg_names.add(pkg["sourcerpm"])
elif output_change == "source_names":
pkg_names.add(pkg["source_name"])
names_sorted = sorted(list(pkg_names))
return names_sorted
# And now I just need to flatten that dict and return all packages as a list
final_pkg_list = []
for pkg_id, pkg in pkgs.items():
final_pkg_list.append(pkg)
# And sort them by nevr which is their ID
final_pkg_list_sorted = sorted(final_pkg_list, key=lambda k: k['id'])
return final_pkg_list_sorted
@lru_cache(maxsize = None)
def view_buildroot_pkgs(self, view_conf_id, arch, output_change=None, maintainer=None):
# Other outputs:
# - "source_names" — a list of SRPM names
if output_change:
if output_change not in ["source_names"]:
raise ValueError('output_change must be one of: "source_names"')
pkgs = {}
buildroot_conf_id = None
for conf_id, conf in self.configs["buildroots"].items():
if conf["view_id"] == view_conf_id:
buildroot_conf_id = conf_id
if not buildroot_conf_id:
if output_change == "source_names":
return []
return {}
# Populate pkgs
base_buildroot = self.configs["buildroots"][buildroot_conf_id]["base_buildroot"][arch]
source_pkgs = self.configs["buildroots"][buildroot_conf_id]["source_packages"][arch]
for pkg_name in base_buildroot:
if pkg_name not in pkgs:
pkgs[pkg_name] = {}
pkgs[pkg_name]["required_by"] = set()
pkgs[pkg_name]["base_buildroot"] = True
pkgs[pkg_name]["srpm_name"] = None
for srpm_name, srpm_data in source_pkgs.items():
for pkg_name in srpm_data["requires"]:
if pkg_name not in pkgs:
pkgs[pkg_name] = {}
pkgs[pkg_name]["required_by"] = set()
pkgs[pkg_name]["base_buildroot"] = False
pkgs[pkg_name]["srpm_name"] = None
pkgs[pkg_name]["required_by"].add(srpm_name)
for buildroot_pkg_relations_conf_id, buildroot_pkg_relations_conf in self.configs["buildroot_pkg_relations"].items():
if view_conf_id != buildroot_pkg_relations_conf["view_id"]:
continue
if arch != buildroot_pkg_relations_conf["arch"]:
continue
buildroot_pkg_relations = buildroot_pkg_relations_conf["pkg_relations"]
for this_pkg_id in buildroot_pkg_relations:
this_pkg_name = pkg_id_to_name(this_pkg_id)
if this_pkg_name in pkgs:
if this_pkg_id in buildroot_pkg_relations and not pkgs[this_pkg_name]["srpm_name"]:
pkgs[this_pkg_name]["srpm_name"] = buildroot_pkg_relations[this_pkg_id]["source_name"]
if output_change == "source_names":
srpms = set()
for pkg_name, pkg in pkgs.items():
if pkg["srpm_name"]:
srpms.add(pkg["srpm_name"])
srpm_names_sorted = sorted(list(srpms))
return srpm_names_sorted
return pkgs
@lru_cache(maxsize = None)
def workload_succeeded(self, workload_conf_id, env_conf_id, repo_id, arch):
workload_ids = self.workloads(workload_conf_id, env_conf_id, repo_id, arch, list_all=True)
for workload_id in workload_ids:
workload = self.data["workloads"][workload_id]
if not workload["succeeded"]:
return False
return True
@lru_cache(maxsize = None)
def workload_warnings(self, workload_conf_id, env_conf_id, repo_id, arch):
workload_ids = self.workloads(workload_conf_id, env_conf_id, repo_id, arch, list_all=True)
for workload_id in workload_ids:
workload = self.data["workloads"][workload_id]
if workload["warnings"]["message"]:
return True
return False
@lru_cache(maxsize = None)
def env_succeeded(self, env_conf_id, repo_id, arch):
env_ids = self.envs(env_conf_id, repo_id, arch, list_all=True)
for env_id in env_ids:
env = self.data["envs"][env_id]
if not env["succeeded"]:
return False
return True
@lru_cache(maxsize = None)
def view_succeeded(self, view_conf_id, arch, maintainer=None):
workload_ids = self.workloads_in_view(view_conf_id, arch)
for workload_id in workload_ids:
workload = self.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
workload_conf = self.configs["workloads"][workload_conf_id]
if maintainer:
workload_maintainer = workload_conf["maintainer"]
if workload_maintainer != maintainer:
continue
if not workload["succeeded"]:
return False
return True
def _srpm_name_to_rpm_names(self, srpm_name, repo_id):
all_pkgs_by_arch = self.data["pkgs"][repo_id]
pkg_names = set()
for arch, pkgs in all_pkgs_by_arch.items():
for pkg_id, pkg in pkgs.items():
if pkg["source_name"] == srpm_name:
pkg_names.add(pkg["name"])
return pkg_names
@lru_cache(maxsize = None)
def view_unwanted_pkgs(self, view_conf_id, arch, output_change=None, maintainer=None):
# Other outputs:
# - "unwanted_proposals" — a list of SRPM names
# - "unwanted_confirmed" — a list of SRPM names
output_lists = ["unwanted_proposals", "unwanted_confirmed"]
if output_change:
if output_change not in output_lists:
raise ValueError('output_change must be one of: "source_names"')
output_lists = output_change
view_conf = self.configs["views"][view_conf_id]
repo_id = view_conf["repository"]
# Find exclusion lists mathing this view's label(s)
unwanted_ids = set()
for view_label in view_conf["labels"]:
for unwanted_id, unwanted in self.configs["unwanteds"].items():
if maintainer:
unwanted_maintainer = unwanted["maintainer"]
if unwanted_maintainer != maintainer:
continue
for unwanted_label in unwanted["labels"]:
if view_label == unwanted_label:
unwanted_ids.add(unwanted_id)
# This will be the package list
unwanted_pkg_names = {}
arches = self.settings["allowed_arches"]
if arch:
arches = [arch]
### Step 1: Get packages from this view's config (unwanted confirmed)
if "unwanted_confirmed" in output_lists:
if not maintainer:
for pkg_name in view_conf["unwanted_packages"]:
pkg = {}
pkg["name"] = pkg_name
pkg["unwanted_in_view"] = True
pkg["unwanted_list_ids"] = []
unwanted_pkg_names[pkg_name] = pkg
for arch in arches:
for pkg_name in view_conf["unwanted_arch_packages"][arch]:
if pkg_name in unwanted_pkg_names:
continue
pkg = {}
pkg["name"] = pkg_name
pkg["unwanted_in_view"] = True
pkg["unwanted_list_ids"] = []
unwanted_pkg_names[pkg_name] = pkg
for pkg_source_name in view_conf["unwanted_source_packages"]:
for pkg_name in self._srpm_name_to_rpm_names(pkg_source_name, repo_id):
if pkg_name in unwanted_pkg_names:
continue
pkg = {}
pkg["name"] = pkg_name
pkg["unwanted_in_view"] = True
pkg["unwanted_list_ids"] = []
unwanted_pkg_names[pkg_name] = pkg
### Step 2: Get packages from the various exclusion lists (unwanted proposal)
if "unwanted_proposals" in output_lists:
for unwanted_id in unwanted_ids:
unwanted_conf = self.configs["unwanteds"][unwanted_id]
for pkg_name in unwanted_conf["unwanted_packages"]:
if pkg_name in unwanted_pkg_names:
unwanted_pkg_names[pkg_name]["unwanted_list_ids"].append(unwanted_id)
continue
pkg = {}
pkg["name"] = pkg_name
pkg["unwanted_in_view"] = False
pkg["unwanted_list_ids"] = [unwanted_id]
unwanted_pkg_names[pkg_name] = pkg
for arch in arches:
for pkg_name in unwanted_conf["unwanted_arch_packages"][arch]:
if pkg_name in unwanted_pkg_names:
unwanted_pkg_names[pkg_name]["unwanted_list_ids"].append(unwanted_id)
continue
pkg = {}
pkg["name"] = pkg_name
pkg["unwanted_in_view"] = True
pkg["unwanted_list_ids"] = []
unwanted_pkg_names[pkg_name] = pkg
for pkg_source_name in unwanted_conf["unwanted_source_packages"]:
for pkg_name in self._srpm_name_to_rpm_names(pkg_source_name, repo_id):
if pkg_name in unwanted_pkg_names:
unwanted_pkg_names[pkg_name]["unwanted_list_ids"].append(unwanted_id)
continue
pkg = {}
pkg["name"] = pkg_name
pkg["unwanted_in_view"] = False
pkg["unwanted_list_ids"] = [unwanted_id]
unwanted_pkg_names[pkg_name] = pkg
#self.cache["view_unwanted_pkgs"][view_conf_id][arch] = unwanted_pkg_names
return unwanted_pkg_names
@lru_cache(maxsize = None)
def view_placeholder_srpms(self, view_conf_id, arch):
if not arch:
raise ValueError("arch must be specified, can't be None")
workload_ids = self.workloads_in_view(view_conf_id, arch)
placeholder_srpms = {}
# {
# "SRPM_NAME": {
# "build_requires": set()
# }
# }
for workload_id in workload_ids:
workload = self.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
workload_conf = self.configs["workloads"][workload_conf_id]
for pkg_placeholder_name, pkg_placeholder in workload_conf["package_placeholders"]["srpms"].items():
# Placeholders can be limited to specific architectures.
# If that's the case, check if it's available on this arch, otherwise skip it.
if pkg_placeholder["limit_arches"]:
if arch not in pkg_placeholder["limit_arches"]:
continue
srpm_name = pkg_placeholder["name"]
buildrequires = pkg_placeholder["buildrequires"]
if srpm_name not in placeholder_srpms:
placeholder_srpms[srpm_name] = {}
placeholder_srpms[srpm_name]["build_requires"] = set()
placeholder_srpms[srpm_name]["build_requires"].update(buildrequires)
return placeholder_srpms
@lru_cache(maxsize = None)
def view_modules(self, view_conf_id, arch, maintainer=None):
workload_ids = self.workloads_in_view(view_conf_id, arch, maintainer)
modules = {}
for workload_id in workload_ids:
workload = self.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
workload_conf = self.configs["workloads"][workload_conf_id]
required_modules = workload_conf["modules_enable"]
for module_id in workload["enabled_modules"]:
if module_id not in modules:
modules[module_id] = {}
modules[module_id]["id"] = module_id
modules[module_id]["q_in"] = set()
modules[module_id]["q_required_in"] = set()
modules[module_id]["q_dep_in"] = set()
modules[module_id]["q_in"].add(workload_id)
if module_id in required_modules:
modules[module_id]["q_required_in"].add(workload_id)
else:
modules[module_id]["q_dep_in"].add(workload_id)
return modules
@lru_cache(maxsize = None)
def view_maintainers(self, view_conf_id, arch):
workload_ids = self.workloads_in_view(view_conf_id, arch)
maintainers = set()
for workload_id in workload_ids:
workload = self.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
workload_conf = self.configs["workloads"][workload_conf_id]
maintainers.add(workload_conf["maintainer"])
return maintainers
@lru_cache(maxsize = None)
def maintainers(self):
maintainers = {}
for workload_id in self.workloads(None, None, None, None, list_all=True):
workload = self.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
workload_conf = self.configs["workloads"][workload_conf_id]
maintainer = workload_conf["maintainer"]
if maintainer not in maintainers:
maintainers[maintainer] = {}
maintainers[maintainer]["name"] = maintainer
maintainers[maintainer]["all_succeeded"] = True
if not workload["succeeded"]:
maintainers[maintainer]["all_succeeded"] = False
for env_id in self.envs(None, None, None, list_all=True):
env = self.data["envs"][env_id]
env_conf_id = env["env_conf_id"]
env_conf = self.configs["envs"][env_conf_id]
maintainer = env_conf["maintainer"]
if maintainer not in maintainers:
maintainers[maintainer] = {}
maintainers[maintainer]["name"] = maintainer
maintainers[maintainer]["all_succeeded"] = True
if not env["succeeded"]:
maintainers[maintainer]["all_succeeded"] = False
return maintainers
@lru_cache(maxsize = None)
def view_pkg_name_details(self, pkg_name, view_conf_id):
raise NotImplementedError
@lru_cache(maxsize = None)
def view_srpm_name_details(self, srpm_name, view_conf_id):
raise NotImplementedError
###############################################################################
### Generating html pages! ####################################################
###############################################################################
def _generate_html_page(template_name, template_data, page_name, settings):
log("Generating the '{page_name}' page...".format(
page_name=page_name
))
output = settings["output"]
template_env = settings["jinja2_template_env"]
template = template_env.get_template("{template_name}.html".format(
template_name=template_name
))
if not template_data:
template_data = {}
template_data["global_refresh_time_started"] = settings["global_refresh_time_started"]
page = template.render(**template_data)
filename = ("{page_name}.html".format(
page_name=page_name.replace(":", "--")
))
log(" Writing file... ({filename})".format(
filename=filename
))
with open(os.path.join(output, filename), "w") as file:
file.write(page)
log(" Done!")
log("")
def _generate_json_page(data, page_name, settings):
log("Generating the '{page_name}' JSON page...".format(
page_name=page_name
))
output = settings["output"]
filename = ("{page_name}.json".format(
page_name=page_name.replace(":", "--")
))
log(" Writing file... ({filename})".format(
filename=filename
))
dump_data(os.path.join(output, filename), data)
log(" Done!")
log("")
def _generate_workload_pages(query):
log("Generating workload pages...")
# Workload overview pages
for workload_conf_id in query.workloads(None,None,None,None,output_change="workload_conf_ids"):
for repo_id in query.workloads(workload_conf_id,None,None,None,output_change="repo_ids"):
template_data = {
"query": query,
"workload_conf_id": workload_conf_id,
"repo_id": repo_id
}
page_name = "workload-overview--{workload_conf_id}--{repo_id}".format(
workload_conf_id=workload_conf_id,
repo_id=repo_id
)
_generate_html_page("workload_overview", template_data, page_name, query.settings)
# Workload detail pages
for workload_id in query.workloads(None,None,None,None,list_all=True):
workload = query.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
workload_conf = query.configs["workloads"][workload_conf_id]
env_conf_id = workload["env_conf_id"]
env_conf = query.configs["envs"][env_conf_id]
repo_id = workload["repo_id"]
repo = query.configs["repos"][repo_id]
template_data = {
"query": query,
"workload_id": workload_id,
"workload": workload,
"workload_conf": workload_conf,
"env_conf": env_conf,
"repo": repo
}
page_name = "workload--{workload_id}".format(
workload_id=workload_id
)
_generate_html_page("workload", template_data, page_name, query.settings)
page_name = "workload-dependencies--{workload_id}".format(
workload_id=workload_id
)
_generate_html_page("workload_dependencies", template_data, page_name, query.settings)
# Workload compare arches pages
for workload_conf_id in query.workloads(None,None,None,None,output_change="workload_conf_ids"):
for env_conf_id in query.workloads(workload_conf_id,None,None,None,output_change="env_conf_ids"):
for repo_id in query.workloads(workload_conf_id,env_conf_id,None,None,output_change="repo_ids"):
arches = query.workloads(workload_conf_id,env_conf_id,repo_id,None,output_change="arches")
workload_conf = query.configs["workloads"][workload_conf_id]
env_conf = query.configs["envs"][env_conf_id]
repo = query.configs["repos"][repo_id]
columns = {}
rows = set()
for arch in arches:
columns[arch] = {}
pkgs = query.workload_pkgs(workload_conf_id,env_conf_id,repo_id,arch)
for pkg in pkgs:
name = pkg["name"]
rows.add(name)
columns[arch][name] = pkg
template_data = {
"query": query,
"workload_conf_id": workload_conf_id,
"workload_conf": workload_conf,
"env_conf_id": env_conf_id,
"env_conf": env_conf,
"repo_id": repo_id,
"repo": repo,
"columns": columns,
"rows": rows
}
page_name = "workload-cmp-arches--{workload_conf_id}--{env_conf_id}--{repo_id}".format(
workload_conf_id=workload_conf_id,
env_conf_id=env_conf_id,
repo_id=repo_id
)
_generate_html_page("workload_cmp_arches", template_data, page_name, query.settings)
# Workload compare envs pages
for workload_conf_id in query.workloads(None,None,None,None,output_change="workload_conf_ids"):
for repo_id in query.workloads(workload_conf_id,None,None,None,output_change="repo_ids"):
for arch in query.workloads(workload_conf_id,None,repo_id,None,output_change="arches"):
env_conf_ids = query.workloads(workload_conf_id,None,repo_id,arch,output_change="env_conf_ids")
workload_conf = query.configs["workloads"][workload_conf_id]
repo = query.configs["repos"][repo_id]
columns = {}
rows = set()
for env_conf_id in env_conf_ids:
columns[env_conf_id] = {}
pkgs = query.workload_pkgs(workload_conf_id,env_conf_id,repo_id,arch)
for pkg in pkgs:
name = pkg["name"]
rows.add(name)
columns[env_conf_id][name] = pkg
template_data = {
"query": query,
"workload_conf_id": workload_conf_id,
"workload_conf": workload_conf,
"repo_id": repo_id,
"repo": repo,
"arch": arch,
"columns": columns,
"rows": rows
}
page_name = "workload-cmp-envs--{workload_conf_id}--{repo_id}--{arch}".format(
workload_conf_id=workload_conf_id,
repo_id=repo_id,
arch=arch
)
_generate_html_page("workload_cmp_envs", template_data, page_name, query.settings)
log(" Done!")
log("")
def _generate_env_pages(query):
log("Generating env pages...")
for env_conf_id in query.envs(None,None,None,output_change="env_conf_ids"):
for repo_id in query.envs(env_conf_id,None,None,output_change="repo_ids"):
template_data = {
"query": query,
"env_conf_id": env_conf_id,
"repo_id": repo_id
}
page_name = "env-overview--{env_conf_id}--{repo_id}".format(
env_conf_id=env_conf_id,
repo_id=repo_id
)
_generate_html_page("env_overview", template_data, page_name, query.settings)
# env detail pages
for env_id in query.envs(None,None,None,list_all=True):
env = query.data["envs"][env_id]
env_conf_id = env["env_conf_id"]
env_conf = query.configs["envs"][env_conf_id]
repo_id = env["repo_id"]
repo = query.configs["repos"][repo_id]
template_data = {
"query": query,
"env_id": env_id,
"env": env,
"env_conf": env_conf,
"repo": repo
}
page_name = "env--{env_id}".format(
env_id=env_id
)
_generate_html_page("env", template_data, page_name, query.settings)
page_name = "env-dependencies--{env_id}".format(
env_id=env_id
)
_generate_html_page("env_dependencies", template_data, page_name, query.settings)
# env compare arches pages
for env_conf_id in query.envs(None,None,None,output_change="env_conf_ids"):
for repo_id in query.envs(env_conf_id,None,None,output_change="repo_ids"):
arches = query.envs(env_conf_id,repo_id,None,output_change="arches")
env_conf = query.configs["envs"][env_conf_id]
repo = query.configs["repos"][repo_id]
columns = {}
rows = set()
for arch in arches:
columns[arch] = {}
pkgs = query.env_pkgs(env_conf_id,repo_id,arch)
for pkg in pkgs:
name = pkg["name"]
rows.add(name)
columns[arch][name] = pkg
template_data = {
"query": query,
"env_conf_id": env_conf_id,
"env_conf": env_conf,
"repo_id": repo_id,
"repo": repo,
"columns": columns,
"rows": rows
}
page_name = "env-cmp-arches--{env_conf_id}--{repo_id}".format(
env_conf_id=env_conf_id,
repo_id=repo_id
)
_generate_html_page("env_cmp_arches", template_data, page_name, query.settings)
log(" Done!")
log("")
def _generate_maintainer_pages(query):
log("Generating maintainer pages...")
for maintainer in query.maintainers():
template_data = {
"query": query,
"maintainer": maintainer
}
page_name = "maintainer--{maintainer}".format(
maintainer=maintainer
)
_generate_html_page("maintainer", template_data, page_name, query.settings)
log(" Done!")
log("")
def _generate_config_pages(query):
log("Generating config pages...")
for conf_type in ["repos", "envs", "workloads", "labels", "views", "unwanteds"]:
template_data = {
"query": query,
"conf_type": conf_type
}
page_name = "configs_{conf_type}".format(
conf_type=conf_type
)
_generate_html_page("configs", template_data, page_name, query.settings)
# Config repo pages
for repo_id,repo_conf in query.configs["repos"].items():
template_data = {
"query": query,
"repo_conf": repo_conf
}
page_name = "config-repo--{repo_id}".format(
repo_id=repo_id
)
_generate_html_page("config_repo", template_data, page_name, query.settings)
# Config env pages
for env_conf_id,env_conf in query.configs["envs"].items():
template_data = {
"query": query,
"env_conf": env_conf
}
page_name = "config-env--{env_conf_id}".format(
env_conf_id=env_conf_id
)
_generate_html_page("config_env", template_data, page_name, query.settings)
# Config workload pages
for workload_conf_id,workload_conf in query.configs["workloads"].items():
template_data = {
"query": query,
"workload_conf": workload_conf
}
page_name = "config-workload--{workload_conf_id}".format(
workload_conf_id=workload_conf_id
)
_generate_html_page("config_workload", template_data, page_name, query.settings)
# Config label pages
for label_conf_id,label_conf in query.configs["labels"].items():
template_data = {
"query": query,
"label_conf": label_conf
}
page_name = "config-label--{label_conf_id}".format(
label_conf_id=label_conf_id
)
_generate_html_page("config_label", template_data, page_name, query.settings)
# Config view pages
for view_conf_id,view_conf in query.configs["views"].items():
template_data = {
"query": query,
"view_conf": view_conf
}
page_name = "config-view--{view_conf_id}".format(
view_conf_id=view_conf_id
)
_generate_html_page("config_view", template_data, page_name, query.settings)
# Config unwanted pages
for unwanted_conf_id,unwanted_conf in query.configs["unwanteds"].items():
template_data = {
"query": query,
"unwanted_conf": unwanted_conf
}
page_name = "config-unwanted--{unwanted_conf_id}".format(
unwanted_conf_id=unwanted_conf_id
)
_generate_html_page("config_unwanted", template_data, page_name, query.settings)
log(" Done!")
log("")
def _generate_repo_pages(query):
log("Generating repo pages...")
for repo_id, repo in query.configs["repos"].items():
for arch in repo["source"]["architectures"]:
template_data = {
"query": query,
"repo": repo,
"arch": arch
}
page_name = "repo--{repo_id}--{arch}".format(
repo_id=repo_id,
arch=arch
)
_generate_html_page("repo", template_data, page_name, query.settings)
log(" Done!")
log("")
def _generate_view_pages_new(query):
log("Generating view pages... (the new function)")
for view_conf_id, view_conf in query.configs["views"].items():
# Skip the obsolete dep_tracker build strategy, that's done by _generate_view_pages_old
if view_conf["type"] == "compose" and view_conf["buildroot_strategy"] == "dep_tracker":
continue
# Skip all the old views
#if view_conf["type"] not in ["compose"]:
# continue
#
#if view_conf["buildroot_strategy"] not in ["root_logs"]:
# continue
# Common data
view_all_arches = query.data["views_all_arches"][view_conf_id]
template_data = {
"query": query,
"view_conf": view_conf,
"view_all_arches": view_all_arches
}
# Generate the overview page
page_name = "view--{view_conf_id}".format(
view_conf_id=view_conf_id
)
_generate_html_page("view_overview", template_data, page_name, query.settings)
# Generate the packages page
page_name = "view-packages--{view_conf_id}".format(
view_conf_id=view_conf_id
)
_generate_html_page("view_packages", template_data, page_name, query.settings)
# Generate the source packages page
page_name = "view-sources--{view_conf_id}".format(
view_conf_id=view_conf_id
)
_generate_html_page("view_sources", template_data, page_name, query.settings)
# Generate the modules page
page_name = "view-modules--{view_conf_id}".format(
view_conf_id=view_conf_id
)
_generate_html_page("view_modules", template_data, page_name, query.settings)
# Generate the unwanted packages page
page_name = "view-unwanted--{view_conf_id}".format(
view_conf_id=view_conf_id
)
_generate_html_page("view_unwanted", template_data, page_name, query.settings)
# Generate the workloads page
page_name = "view-workloads--{view_conf_id}".format(
view_conf_id=view_conf_id
)
_generate_html_page("view_workloads", template_data, page_name, query.settings)
# Generate the errors page
page_name = "view-errors--{view_conf_id}".format(
view_conf_id=view_conf_id
)
_generate_html_page("view_errors", template_data, page_name, query.settings)
# Generate the arch lists
for arch in view_conf["architectures"]:
view_id = "{view_conf_id}:{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
view = query.data["views"][view_id]
template_data = {
"query": query,
"view_conf": view_conf,
"view": view,
"arch": arch,
}
page_name = "view--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
#_generate_html_page("view_packages", template_data, page_name, query.settings)
# ...
# Generate the RPM pages
for pkg_name, pkg in view_all_arches["pkgs_by_name"].items():
template_data = {
"query": query,
"view_conf": view_conf,
"view_all_arches": view_all_arches,
"pkg": pkg,
}
page_name = "view-rpm--{view_conf_id}--{pkg_name}".format(
view_conf_id=view_conf_id,
pkg_name=pkg_name
)
_generate_html_page("view_rpm", template_data, page_name, query.settings)
# Generate the SRPM pages
for srpm_name, srpm in view_all_arches["source_pkgs_by_name"].items():
template_data = {
"query": query,
"view_conf": view_conf,
"view_all_arches": view_all_arches,
"srpm": srpm,
}
page_name = "view-srpm--{view_conf_id}--{srpm_name}".format(
view_conf_id=view_conf_id,
srpm_name=srpm_name
)
_generate_html_page("view_srpm", template_data, page_name, query.settings)
def _generate_view_pages_old(query):
log("Generating view pages... (the old function)")
for view_conf_id, view_conf in query.configs["views"].items():
# This function is now only used for the obsolete dep_tracker build strategy
if view_conf["type"] == "compose" and view_conf["buildroot_strategy"] == "dep_tracker":
# ==================
# === Part 1 ===
# ==================
#
# First, generate the overview page comparing all architectures
log(" Generating 'compose' view overview {view_conf_id}".format(
view_conf_id=view_conf_id
))
repo_id = view_conf["repository"]
# That page needs the number of binary and source packages for each architecture
arch_pkg_counts = {}
all_arches_nevrs = set()
all_arches_unwanteds = set()
all_arches_source_nvrs = set()
for arch in query.settings["allowed_arches"]:
arch_pkg_counts[arch] = {}
workload_ids = query.workloads_in_view(view_conf_id, arch=arch)
pkg_ids = query.pkgs_in_view(view_conf_id, arch, output_change="ids")
pkg_nevrs = query.pkgs_in_view(view_conf_id, arch, output_change="nevrs")
pkg_binary_names = query.pkgs_in_view(view_conf_id, arch, output_change="binary_names")
pkg_source_nvr = query.pkgs_in_view(view_conf_id, arch, output_change="source_nvr")
pkg_source_names = query.pkgs_in_view(view_conf_id, arch, output_change="source_names")
unwanted_pkgs = query.view_unwanted_pkgs(view_conf_id, arch)
unwanted_packages_count = 0
for pkg_name in unwanted_pkgs:
if pkg_name in pkg_binary_names:
unwanted_packages_count += 1
all_arches_unwanteds.add(pkg_name)
arch_pkg_counts[arch]["pkg_ids"] = len(pkg_ids)
arch_pkg_counts[arch]["pkg_binary_names"] = len(pkg_binary_names)
arch_pkg_counts[arch]["source_pkg_nvr"] = len(pkg_source_nvr)
arch_pkg_counts[arch]["source_pkg_names"] = len(pkg_source_names)
arch_pkg_counts[arch]["unwanted_packages"] = unwanted_packages_count
all_arches_nevrs.update(pkg_nevrs)
all_arches_source_nvrs.update(pkg_source_nvr)
template_data = {
"query": query,
"view_conf": view_conf,
"arch_pkg_counts": arch_pkg_counts,
"all_pkg_count": len(all_arches_nevrs),
"all_unwanted_count": len(all_arches_unwanteds),
"all_source_nvr_count": len(all_arches_source_nvrs)
}
page_name = "view--{view_conf_id}".format(
view_conf_id=view_conf_id
)
_generate_html_page("view_compose_overview", template_data, page_name, query.settings)
log(" Done!")
log("")
# ==================
# === Part 2 ===
# ==================
#
# Second, generate detail pages for each architecture
for arch in query.arches_in_view(view_conf_id):
# First, generate the overview page comparing all architectures
log(" Generating 'compose' view {view_conf_id} for {arch}".format(
view_conf_id=view_conf_id,
arch=arch
))
template_data = {
"query": query,
"view_conf": view_conf,
"arch": arch,
}
page_name = "view--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_html_page("view_compose_packages", template_data, page_name, query.settings)
page_name = "view-modules--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_html_page("view_compose_modules", template_data, page_name, query.settings)
page_name = "view-unwanted--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_html_page("view_compose_unwanted", template_data, page_name, query.settings)
page_name = "view-buildroot--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_html_page("view_compose_buildroot", template_data, page_name, query.settings)
page_name = "view-workloads--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_html_page("view_compose_workloads", template_data, page_name, query.settings)
# ==================
# === Part 3 ===
# ==================
#
# third, generate one page per RPM name
pkg_names = set()
buildroot_pkg_names = set()
all_pkg_names = set()
#save some useful data for the SRPM pages below
pkg_name_data = {}
all_arches = query.arches_in_view(view_conf_id)
for arch in all_arches:
pkg_names.update(query.pkgs_in_view(view_conf_id, arch, output_change="binary_names"))
buildroot_pkg_srpm_requires = {}
for arch in all_arches:
buildroot_pkg_srpm_requires[arch] = query.view_buildroot_pkgs(view_conf_id, arch)
for arch in all_arches:
for buildroot_pkg_name in buildroot_pkg_srpm_requires[arch]:
buildroot_pkg_names.add(buildroot_pkg_name)
all_pkg_names.update(pkg_names)
all_pkg_names.update(buildroot_pkg_names)
for pkg_name in all_pkg_names:
pkg_ids = {}
workload_conf_ids_required = {}
workload_conf_ids_dependency = {}
workload_conf_ids_env = {}
required_to_build_srpms = set()
#pkgs_required_by["this_pkg_id"]["required_by_name"] = set() of required_by_ids
pkgs_required_by = {}
exclusion_list_ids = {}
unwanted_in_view = False
build_dependency = False
pkg_srpm_name = None
# 1: Runtime package stuff
if pkg_name in pkg_names:
for arch in all_arches:
for pkg in query.pkgs_in_view(view_conf_id, arch):
pkg_nevra = "{name}-{evr}.{arch}".format(
name=pkg["name"],
evr=pkg["evr"],
arch=pkg["arch"]
)
if pkg["name"] == pkg_name:
if pkg_nevra not in pkg_ids:
pkg_ids[pkg_nevra] = set()
pkg_ids[pkg_nevra].add(arch)
pkg_srpm_name = pkg["source_name"]
for workload_id in pkg["q_required_in"]:
workload = query.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
if workload_conf_id not in workload_conf_ids_required:
workload_conf_ids_required[workload_conf_id] = set()
workload_conf_ids_required[workload_conf_id].add(arch)
for workload_id in pkg["q_dep_in"]:
workload = query.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
if workload_conf_id not in workload_conf_ids_dependency:
workload_conf_ids_dependency[workload_conf_id] = set()
workload_conf_ids_dependency[workload_conf_id].add(arch)
for workload_id in pkg["q_env_in"]:
workload = query.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
if workload_conf_id not in workload_conf_ids_env:
workload_conf_ids_env[workload_conf_id] = set()
workload_conf_ids_env[workload_conf_id].add(arch)
for pkg_unwanted_name, pkg_unwanted_data in query.view_unwanted_pkgs(view_conf_id, arch).items():
if pkg_name == pkg_unwanted_name:
if pkg_unwanted_data["unwanted_in_view"]:
unwanted_in_view = True
for exclusion_list_id in pkg_unwanted_data["unwanted_list_ids"]:
if exclusion_list_id not in exclusion_list_ids:
exclusion_list_ids[exclusion_list_id] = set()
exclusion_list_ids[exclusion_list_id].add(arch)
for arch in all_arches:
for workload_id in query.workloads_in_view(view_conf_id, arch):
workload = query.data["workloads"][workload_id]
workload_pkgs = query.workload_pkgs_id(workload_id)
workload_pkg_relations = workload["pkg_relations"]
workload_conf_id = workload["workload_conf_id"]
for this_pkg_id in pkg_ids:
if this_pkg_id not in workload_pkg_relations:
continue
if this_pkg_id not in pkgs_required_by:
pkgs_required_by[this_pkg_id] = {}
for required_by_id in workload_pkg_relations[this_pkg_id]["required_by"]:
required_by_name = pkg_id_to_name(required_by_id)
if required_by_name not in pkgs_required_by[this_pkg_id]:
pkgs_required_by[this_pkg_id][required_by_name] = set()
pkgs_required_by[this_pkg_id][required_by_name].add(required_by_id)
# Not all packages that are required by something are marked as "dependency"
# on the list — like those marked "required" for example. So adding them
# to the list of dependencies here additionally
if workload_conf_id not in workload_conf_ids_dependency:
workload_conf_ids_dependency[workload_conf_id] = set()
workload_conf_ids_dependency[workload_conf_id].add(arch)
# 2: Buildroot package stuff
if pkg_name in buildroot_pkg_names:
build_dependency = True
for buildroot_pkg_relations_conf_id, buildroot_pkg_relations_conf in query.configs["buildroot_pkg_relations"].items():
if view_conf_id == buildroot_pkg_relations_conf["view_id"]:
arch = buildroot_pkg_relations_conf["arch"]
buildroot_pkg_relations = buildroot_pkg_relations_conf["pkg_relations"]
for this_pkg_id in buildroot_pkg_relations:
this_pkg_name = pkg_id_to_name(this_pkg_id)
if this_pkg_name == pkg_name:
if this_pkg_id not in pkg_ids:
pkg_ids[this_pkg_id] = set()
pkg_ids[this_pkg_id].add(arch)
if this_pkg_id in buildroot_pkg_relations and not pkg_srpm_name:
pkg_srpm_name = buildroot_pkg_relations[this_pkg_id]["source_name"]
for this_pkg_id in pkg_ids:
if this_pkg_id not in buildroot_pkg_relations:
continue
if this_pkg_id not in pkgs_required_by:
pkgs_required_by[this_pkg_id] = {}
for required_by_id in buildroot_pkg_relations[this_pkg_id]["required_by"]:
required_by_name = pkg_id_to_name(required_by_id)
if required_by_name not in pkgs_required_by[this_pkg_id]:
pkgs_required_by[this_pkg_id][required_by_name] = set()
pkgs_required_by[this_pkg_id][required_by_name].add(required_by_id + " (buildroot only)")
# required to build XX SRPMs
for arch in all_arches:
if pkg_name in buildroot_pkg_srpm_requires[arch]:
required_to_build_srpms.update(set(buildroot_pkg_srpm_requires[arch][pkg_name]["required_by"]))
template_data = {
"query": query,
"view_conf": view_conf,
"pkg_name": pkg_name,
"srpm_name": pkg_srpm_name,
"pkg_ids": pkg_ids,
"view_all_pkg_names": all_pkg_names,
"workload_conf_ids_required": workload_conf_ids_required,
"workload_conf_ids_dependency": workload_conf_ids_dependency,
"workload_conf_ids_env": workload_conf_ids_env,
"exclusion_list_ids": exclusion_list_ids,
"unwanted_in_view": unwanted_in_view,
"pkgs_required_by": pkgs_required_by,
"build_dependency": build_dependency,
"required_to_build_srpms": required_to_build_srpms
}
pkg_name_data[pkg_name] = template_data
page_name = "view-rpm--{view_conf_id}--{pkg_name}".format(
view_conf_id=view_conf_id,
pkg_name=pkg_name
)
_generate_html_page("view_compose_rpm", template_data, page_name, query.settings)
# ==================
# === Part 4 ===
# ==================
#
# fourth, generate one page per SRPM name
srpm_names = set()
buildroot_srpm_names = set()
all_srpm_names = set()
for arch in all_arches:
srpm_names.update(query.pkgs_in_view(view_conf_id, arch, output_change="source_names"))
for arch in all_arches:
buildroot_srpm_names.update(query.view_buildroot_pkgs(view_conf_id, arch, output_change="source_names"))
srpm_maintainers = {}
if "srpm_maintainers" in query.computed_data["views"][view_conf_id]:
srpm_maintainers = query.computed_data["views"][view_conf_id]["srpm_maintainers"]
all_srpm_names.update(srpm_names)
all_srpm_names.update(buildroot_srpm_names)
for srpm_name in all_srpm_names:
# Since it doesn't include buildroot, yet, I'll need to recreate those manually for now
if srpm_name in srpm_maintainers:
recommended_maintainers = srpm_maintainers[srpm_name]
else:
recommended_maintainers = {}
recommended_maintainers["top"] = None
recommended_maintainers["all"] = {}
srpm_pkg_names = set()
for arch in all_arches:
for pkg in query.pkgs_in_view(view_conf_id, arch):
if pkg["source_name"] == srpm_name:
srpm_pkg_names.add(pkg["name"])
for buildroot_pkg_relations_conf_id, buildroot_pkg_relations_conf in query.configs["buildroot_pkg_relations"].items():
if view_conf_id == buildroot_pkg_relations_conf["view_id"]:
buildroot_pkg_relations = buildroot_pkg_relations_conf["pkg_relations"]
for buildroot_pkg_id, buildroot_pkg in buildroot_pkg_relations.items():
if srpm_name == buildroot_pkg["source_name"]:
buildroot_pkg_name = pkg_id_to_name(buildroot_pkg_id)
srpm_pkg_names.add(buildroot_pkg_name)
ownership_recommendations = None
if "ownership_recommendations" in query.computed_data["views"][view_conf_id]:
if srpm_name in query.computed_data["views"][view_conf_id]["ownership_recommendations"]:
ownership_recommendations = query.computed_data["views"][view_conf_id]["ownership_recommendations"][srpm_name]
template_data = {
"query": query,
"view_conf": view_conf,
"ownership_recommendations": ownership_recommendations,
"recommended_maintainers": recommended_maintainers,
"srpm_name": srpm_name,
"pkg_names": srpm_pkg_names,
"pkg_name_data": pkg_name_data
}
page_name = "view-srpm--{view_conf_id}--{srpm_name}".format(
view_conf_id=view_conf_id,
srpm_name=srpm_name
)
_generate_html_page("view_compose_srpm", template_data, page_name, query.settings)
log(" Done!")
log("")
def _generate_a_flat_list_file(data_list, file_name, settings):
file_contents = "\n".join(data_list)
filename = ("{file_name}.txt".format(
file_name=file_name.replace(":", "--")
))
output = settings["output"]
log(" Writing file... ({filename})".format(
filename=filename
))
with open(os.path.join(output, filename), "w") as file:
file.write(file_contents)
def _generate_view_lists_new(query):
log("Generating view lists...")
for view_conf_id, view_conf in query.configs["views"].items():
# Skip the obsolete dep_tracker build strategy, that's done by _generate_view_lists_old
if view_conf["type"] == "compose" and view_conf["buildroot_strategy"] == "dep_tracker":
continue
# all RPM NEVRAs view-all-binary-package-list
# all RPM NEVRs view-all-binary-package-nevr-list
# all RPM Names view-all-binary-package-name-list
#
# all SRPM NEVRs view-all-source-package-list
# all SRPM Names view-all-source-package-name-list
#
#
# runtime RPM NEVRAs view-binary-package-list
# runtime RPM NEVRs view-binary-package-nevr-list
# runtime RPM Names view-binary-package-name-list
#
# runtime SRPM NEVRs view-source-package-list
# runtime SRPM Names view-source-package-name-list
#
#
# build RPM NEVRAs view-buildroot-package-list
# build RPM NEVRs view-buildroot-package-nevr-list
# build RPM Names view-buildroot-package-name-list
#
# build SRPM NEVRs view-buildroot-package-nevr-list
# build SRPM Names view-buildroot-source-package-name-list
all_arches_lists = {}
for arch in view_conf["architectures"]:
lists = {}
view_id = "{view_conf_id}:{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
view = query.data["views"][view_id]
# all RPM NEVRAs view-all-binary-package-list
# all RPM NEVRs view-all-binary-package-nevr-list
# all RPM Names view-all-binary-package-name-list
lists["view-all-binary-package-list"] = set()
lists["view-all-binary-package-nevr-list"] = set()
lists["view-all-binary-package-name-list"] = set()
# all SRPM NEVRs view-all-source-package-list
# all SRPM Names view-all-source-package-name-list
lists["view-all-source-package-list"] = set()
lists["view-all-source-package-name-list"] = set()
# runtime RPM NEVRAs view-binary-package-list
# runtime RPM NEVRs view-binary-package-nevr-list
# runtime RPM Names view-binary-package-name-list
lists["view-binary-package-list"] = set()
lists["view-binary-package-nevr-list"] = set()
lists["view-binary-package-name-list"] = set()
# runtime SRPM NEVRs view-source-package-list
# runtime SRPM Names view-source-package-name-list
lists["view-source-package-list"] = set()
lists["view-source-package-name-list"] = set()
# build RPM NEVRAs view-buildroot-package-list
# build RPM NEVRs view-buildroot-package-nevr-list
# build RPM Names view-buildroot-package-name-list
lists["view-buildroot-package-list"] = set()
lists["view-buildroot-package-nevr-list"] = set()
lists["view-buildroot-package-name-list"] = set()
# build SRPM NEVRs view-buildroot-source-package-list
# build SRPM Names view-buildroot-source-package-name-list
lists["view-buildroot-source-package-list"] = set()
lists["view-buildroot-source-package-name-list"] = set()
for pkg_id, pkg in view["pkgs"].items():
lists["view-all-binary-package-list"].add(pkg_id)
lists["view-all-binary-package-nevr-list"].add(pkg["nevr"])
lists["view-all-binary-package-name-list"].add(pkg["name"])
srpm_id = pkg["sourcerpm"].rsplit(".src.rpm")[0]
lists["view-all-source-package-list"].add(srpm_id)
lists["view-all-source-package-name-list"].add(pkg["source_name"])
if pkg["in_workload_ids_all"]:
lists["view-binary-package-list"].add(pkg_id)
lists["view-binary-package-nevr-list"].add(pkg["nevr"])
lists["view-binary-package-name-list"].add(pkg["name"])
lists["view-source-package-list"].add(srpm_id)
lists["view-source-package-name-list"].add(pkg["source_name"])
else:
lists["view-buildroot-package-list"].add(pkg_id)
lists["view-buildroot-package-nevr-list"].add(pkg["nevr"])
lists["view-buildroot-package-name-list"].add(pkg["name"])
lists["view-buildroot-source-package-list"].add(srpm_id)
lists["view-buildroot-source-package-name-list"].add(pkg["source_name"])
for list_name, list_content in lists.items():
# Generate the arch-specific lists
file_name = "{list_name}--{view_conf_id}--{arch}".format(
list_name=list_name,
view_conf_id=view_conf_id,
arch=arch
)
_generate_a_flat_list_file(sorted(list(list_content)), file_name, query.settings)
# Populate the all-arch lists
if list_name not in all_arches_lists:
all_arches_lists[list_name] = set()
all_arches_lists[list_name].update(list_content)
for list_name, list_content in all_arches_lists.items():
# Generate the all-arch lists
file_name = "{list_name}--{view_conf_id}".format(
list_name=list_name,
view_conf_id=view_conf_id
)
_generate_a_flat_list_file(sorted(list(list_content)), file_name, query.settings)
def _generate_view_lists_old(query):
log("Generating view lists...")
for view_conf_id,view_conf in query.configs["views"].items():
# This function is now only used for the obsolete dep_tracker build strategy
if view_conf["type"] == "compose" and view_conf["buildroot_strategy"] == "dep_tracker":
repo_id = view_conf["repository"]
for arch in query.arches_in_view(view_conf_id):
# First, generate the overview page comparing all architectures
log(" Generating 'compose' package list {view_conf_id} for {arch}".format(
view_conf_id=view_conf_id,
arch=arch
))
pkg_ids = query.pkgs_in_view(view_conf_id, arch, output_change="ids")
pkg_binary_names = query.pkgs_in_view(view_conf_id, arch, output_change="binary_names")
pkg_source_nvrs = query.pkgs_in_view(view_conf_id, arch, output_change="source_nvr")
pkg_source_names = query.pkgs_in_view(view_conf_id, arch, output_change="source_names")
# If it's the addon view, there are two SRPM lists:
# 1/ all SRPMs in the addon, with some potentially also being in the base view
# because, one SRPM can have RPMs in both
# 2/ added SRPMs - only SRPMs not in the base view, potentially missing
# some SRPMs that are in this addon
if view_conf["type"] == "addon":
base_view_id = view_conf["base_view_id"]
base_pkg_source_nvrs = query.pkgs_in_view(base_view_id, arch, output_change="source_nvr")
base_pkg_source_names = query.pkgs_in_view(base_view_id, arch, output_change="source_names")
added_pkg_source_nvrs = sorted(list(set(pkg_source_nvrs) - set(base_pkg_source_nvrs)))
added_pkg_source_names = sorted(list(set(pkg_source_names) - set(base_pkg_source_names)))
buildroot_data = query.view_buildroot_pkgs(view_conf_id, arch)
pkg_buildroot_source_names = query.view_buildroot_pkgs(view_conf_id, arch, output_change="source_names")
if buildroot_data:
pkg_buildroot_names = buildroot_data.keys()
else:
pkg_buildroot_names = []
modules = query.view_modules(view_conf_id, arch)
file_name = "view-binary-package-list--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_a_flat_list_file(pkg_ids, file_name, query.settings)
file_name = "view-binary-package-name-list--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_a_flat_list_file(pkg_binary_names, file_name, query.settings)
if view_conf["type"] == "compose":
file_name = "view-source-package-list--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_a_flat_list_file(pkg_source_nvrs, file_name, query.settings)
file_name = "view-source-package-name-list--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_a_flat_list_file(pkg_source_names, file_name, query.settings)
elif view_conf["type"] == "addon":
file_name = "view-all-source-package-list--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_a_flat_list_file(pkg_source_nvrs, file_name, query.settings)
file_name = "view-all-source-package-name-list--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_a_flat_list_file(pkg_source_names, file_name, query.settings)
file_name = "view-added-source-package-list--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_a_flat_list_file(added_pkg_source_nvrs, file_name, query.settings)
file_name = "view-added-source-package-name-list--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_a_flat_list_file(added_pkg_source_names, file_name, query.settings)
file_name = "view-buildroot-package-name-list--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_a_flat_list_file(pkg_buildroot_names, file_name, query.settings)
file_name = "view-buildroot-source-package-name-list--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_a_flat_list_file(pkg_buildroot_source_names, file_name, query.settings)
file_name = "view-module-list--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_generate_a_flat_list_file(modules, file_name, query.settings)
file_name = "view-placeholder-srpm-details--{view_conf_id}--{arch}.json".format(
view_conf_id=view_conf_id,
arch=arch
)
file_path = os.path.join(query.settings["output"], file_name)
view_placeholder_srpm_details = query.view_placeholder_srpms(view_conf_id, arch)
dump_data(file_path, view_placeholder_srpm_details)
log(" Done!")
log("")
def _dump_all_data(query):
log("Dumping all data...")
data = {}
data["data"] = query.data
data["configs"] = query.configs
data["settings"] = query.settings
data["computed_data"] = query.computed_data
file_name = "data.json"
file_path = os.path.join(query.settings["output"], file_name)
dump_data(file_path, data)
log(" Done!")
log("")
def generate_pages(query):
log("")
log("###############################################################################")
log("### Generating html pages! ####################################################")
log("###############################################################################")
log("")
# Create the jinja2 thingy
template_loader = jinja2.FileSystemLoader(searchpath="./templates/")
template_env = jinja2.Environment(
loader=template_loader,
trim_blocks=True,
lstrip_blocks=True
)
query.settings["jinja2_template_env"] = template_env
# Copy static files
log("Copying static files...")
src_static_dir = os.path.join("templates", "_static")
output_static_dir = os.path.join(query.settings["output"])
subprocess.run(["cp", "-R", src_static_dir, output_static_dir])
log(" Done!")
log("")
# Generate the landing page
_generate_html_page("homepage", None, "index", query.settings)
# Generate the main menu page
_generate_html_page("results", None, "results", query.settings)
# Generate config pages
_generate_config_pages(query)
# Generate the top-level results pages
template_data = {
"query": query
}
_generate_html_page("repos", template_data, "repos", query.settings)
_generate_html_page("envs", template_data, "envs", query.settings)
_generate_html_page("workloads", template_data, "workloads", query.settings)
_generate_html_page("labels", template_data, "labels", query.settings)
_generate_html_page("views", template_data, "views", query.settings)
_generate_html_page("maintainers", template_data, "maintainers", query.settings)
# Generate repo pages
_generate_repo_pages(query)
# Generate maintainer pages
_generate_maintainer_pages(query)
# Generate env_overview pages
_generate_env_pages(query)
# Generate workload_overview pages
_generate_workload_pages(query)
# Generate view pages
_generate_view_pages_new(query)
_generate_view_pages_old(query)
# Generate flat lists for views
_generate_view_lists_new(query)
_generate_view_lists_old(query)
# Dump all data
# The data is now pretty huge and not really needed anyway
#if not query.settings["use_cache"]:
# _dump_all_data(query)
# Generate the errors page
template_data = {
"query": query
}
_generate_html_page("errors", template_data, "errors", query.settings)
log("")
log("###############################################################################")
log("### Generating JSON pages! ####################################################")
log("###############################################################################")
log("")
# Generate data for the top-level results pages
maintainer_data = query.maintainers()
_generate_json_page(maintainer_data, "maintainers", query.settings)
###############################################################################
### Historic Data #############################################################
###############################################################################
def _save_package_history(query):
# This is generating historic (and present) package lists
# Data for the historic charts is the function below
log("Generating current package history lists...")
# /history/
# /history/2020-week_28/
# /history/2020-week_28/workload--WORKLOAD_ID.json
# /history/2020-week_28/workload-conf--WORKLOAD_CONF_ID.json
# /history/2020-week_28/env--ENV_ID.json
# /history/2020-week_28/env-conf--ENV_CONF_ID.json
# /history/2020-week_28/view--VIEW_CONF_ID.json
# Where to save it
year = datetime.datetime.now().strftime("%Y")
week = datetime.datetime.now().strftime("%W")
date = str(datetime.datetime.now().strftime("%Y-%m-%d"))
output_dir = os.path.join(query.settings["output"], "history")
output_subdir = "{year}-week_{week}".format(
year=year,
week=week
)
subprocess.run(["mkdir", "-p", os.path.join(output_dir, output_subdir)])
# Also save the current data to the standard output dir
current_version_output_dir = query.settings["output"]
# == Workloads
log("")
log("Workloads:")
for workload_conf_id, workload_conf in query.configs["workloads"].items():
# === Config
log("")
log(" Config for: {}".format(workload_conf_id))
# Where to save
filename = "workload-conf--{workload_conf_id_slug}.json".format(
workload_conf_id_slug = query.url_slug_id(workload_conf_id)
)
file_path = os.path.join(output_dir, output_subdir, filename)
current_version_file_path = os.path.join(current_version_output_dir, filename)
# What to save
output_data = {}
output_data["date"] = date
output_data["id"] = workload_conf_id
output_data["type"] = "workload_conf"
output_data["data"] = query.configs["workloads"][workload_conf_id]
# And save it
log(" Saving in: {file_path}".format(
file_path=file_path
))
dump_data(file_path, output_data)
# Also save the current data to the standard output dir
log(" Saving in: {current_version_file_path}".format(
current_version_file_path=current_version_file_path
))
dump_data(current_version_file_path, output_data)
# === Results
for workload_id in query.workloads(workload_conf_id, None, None, None, list_all=True):
workload = query.data["workloads"][workload_id]
log(" Results: {}".format(workload_id))
# Where to save
filename = "workload--{workload_id_slug}.json".format(
workload_id_slug = query.url_slug_id(workload_id)
)
file_path = os.path.join(output_dir, output_subdir, filename)
current_version_file_path = os.path.join(current_version_output_dir, filename)
# What to save
output_data = {}
output_data["date"] = date
output_data["id"] = workload_id
output_data["type"] = "workload"
output_data["data"] = query.data["workloads"][workload_id]
output_data["pkg_query"] = query.workload_pkgs_id(workload_id)
# And save it
log(" Saving in: {file_path}".format(
file_path=file_path
))
dump_data(file_path, output_data)
# Also save the current data to the standard output dir
log(" Saving in: {current_version_file_path}".format(
current_version_file_path=current_version_file_path
))
dump_data(current_version_file_path, output_data)
# == envs
log("")
log("Envs:")
for env_conf_id, env_conf in query.configs["envs"].items():
# === Config
log("")
log(" Config for: {}".format(env_conf_id))
# Where to save
filename = "env-conf--{env_conf_id_slug}.json".format(
env_conf_id_slug = query.url_slug_id(env_conf_id)
)
file_path = os.path.join(output_dir, output_subdir, filename)
current_version_file_path = os.path.join(current_version_output_dir, filename)
# What to save
output_data = {}
output_data["date"] = date
output_data["id"] = env_conf_id
output_data["type"] = "env_conf"
output_data["data"] = query.configs["envs"][env_conf_id]
# And save it
log(" Saving in: {file_path}".format(
file_path=file_path
))
dump_data(file_path, output_data)
# Also save the current data to the standard output dir
log(" Saving in: {current_version_file_path}".format(
current_version_file_path=current_version_file_path
))
dump_data(current_version_file_path, output_data)
# === Results
for env_id in query.envs(env_conf_id, None, None, list_all=True):
env = query.data["envs"][env_id]
log(" Results: {}".format(env_id))
# Where to save
filename = "env--{env_id_slug}.json".format(
env_id_slug = query.url_slug_id(env_id)
)
file_path = os.path.join(output_dir, output_subdir, filename)
current_version_file_path = os.path.join(current_version_output_dir, filename)
# What to save
output_data = {}
output_data["date"] = date
output_data["id"] = env_id
output_data["type"] = "env"
output_data["data"] = query.data["envs"][env_id]
output_data["pkg_query"] = query.env_pkgs_id(env_id)
# And save it
log(" Saving in: {file_path}".format(
file_path=file_path
))
dump_data(file_path, output_data)
# Also save the current data to the standard output dir
log(" Saving in: {current_version_file_path}".format(
current_version_file_path=current_version_file_path
))
dump_data(current_version_file_path, output_data)
# == views
log("")
log("views:")
for view_conf_id, view_conf in query.configs["views"].items():
# === Config
log("")
log(" Config for: {}".format(view_conf_id))
# Where to save
filename = "view-conf--{view_conf_id_slug}.json".format(
view_conf_id_slug = query.url_slug_id(view_conf_id)
)
file_path = os.path.join(output_dir, output_subdir, filename)
current_version_file_path = os.path.join(current_version_output_dir, filename)
# What to save
output_data = {}
output_data["date"] = date
output_data["id"] = view_conf_id
output_data["type"] = "view_conf"
output_data["data"] = query.configs["views"][view_conf_id]
# And save it
log(" Saving in: {file_path}".format(
file_path=file_path
))
dump_data(file_path, output_data)
# Also save the current data to the standard output dir
log(" Saving in: {current_version_file_path}".format(
current_version_file_path=current_version_file_path
))
dump_data(current_version_file_path, output_data)
# === Results
for arch in query.arches_in_view(view_conf_id):
log(" Results: {}".format(env_id))
view_id = "{view_conf_id}:{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
# Where to save
filename = "view--{view_id_slug}.json".format(
view_id_slug = query.url_slug_id(view_id)
)
file_path = os.path.join(output_dir, output_subdir, filename)
current_version_file_path = os.path.join(current_version_output_dir, filename)
# What to save
output_data = {}
output_data["date"] = date
output_data["id"] = view_id
output_data["type"] = "view"
output_data["workload_ids"] = query.workloads_in_view(view_conf_id, arch)
output_data["pkg_query"] = query.pkgs_in_view(view_conf_id, arch)
output_data["unwanted_pkg"] = query.view_unwanted_pkgs(view_conf_id, arch)
# And save it
log(" Saving in: {file_path}".format(
file_path=file_path
))
dump_data(file_path, output_data)
# Also save the current data to the standard output dir
log(" Saving in: {current_version_file_path}".format(
current_version_file_path=current_version_file_path
))
dump_data(current_version_file_path, output_data)
# == Also, save the buildroot data
# Where to save
filename = "view-buildroot--{view_id_slug}.json".format(
view_id_slug = query.url_slug_id(view_id)
)
file_path = os.path.join(output_dir, output_subdir, filename)
current_version_file_path = os.path.join(current_version_output_dir, filename)
# What to save
output_data = {}
output_data["date"] = date
output_data["id"] = view_id
output_data["type"] = "view-buildroot"
output_data["pkgs"] = query.view_buildroot_pkgs(view_conf_id, arch)
# And save it
log(" Saving in: {file_path}".format(
file_path=file_path
))
dump_data(file_path, output_data)
# Also save the current data to the standard output dir
log(" Saving in: {current_version_file_path}".format(
current_version_file_path=current_version_file_path
))
dump_data(current_version_file_path, output_data)
log(" Done!")
log("")
def _save_current_historic_data(query):
# This is the historic data for charts
# Package lists are above
log("Generating current historic data...")
# Where to save it
year = datetime.datetime.now().strftime("%Y")
week = datetime.datetime.now().strftime("%W")
filename = "historic_data-{year}-week_{week}.json".format(
year=year,
week=week
)
output_dir = os.path.join(query.settings["output"], "history")
file_path = os.path.join(output_dir, filename)
# What to save there
history_data = {}
history_data["date"] = str(datetime.datetime.now().strftime("%Y-%m-%d"))
history_data["workloads"] = {}
history_data["envs"] = {}
history_data["repos"] = {}
history_data["views"] = {}
for workload_id in query.workloads(None,None,None,None,list_all=True):
workload = query.data["workloads"][workload_id]
if not workload["succeeded"]:
continue
workload_history = {}
workload_history["size"] = query.workload_size_id(workload_id)
workload_history["pkg_count"] = len(query.workload_pkgs_id(workload_id))
history_data["workloads"][workload_id] = workload_history
for env_id in query.envs(None,None,None,list_all=True):
env = query.data["envs"][env_id]
if not env["succeeded"]:
continue
env_history = {}
env_history["size"] = query.env_size_id(env_id)
env_history["pkg_count"] = len(query.env_pkgs_id(env_id))
history_data["envs"][env_id] = env_history
for repo_id in query.configs["repos"].keys():
history_data["repos"][repo_id] = {}
for arch, pkgs in query.data["pkgs"][repo_id].items():
repo_history = {}
repo_history["pkg_count"] = len(pkgs)
history_data["repos"][repo_id][arch] = repo_history
for view_conf_id in query.configs["views"].keys():
history_data["views"][view_conf_id] = {}
for arch in query.arches_in_view(view_conf_id):
pkg_ids = query.pkgs_in_view(view_conf_id, arch)
view_history = {}
view_history["pkg_count"] = len(pkg_ids)
history_data["views"][view_conf_id][arch] = view_history
# And save it
log(" Saving in: {file_path}".format(
file_path=file_path
))
dump_data(file_path, history_data)
log(" Done!")
log("")
def _read_historic_data(query):
log("Reading historic data...")
directory = os.path.join(query.settings["output"], "history")
# Do some basic validation of the filename
all_filenames = os.listdir(directory)
valid_filenames = []
for filename in all_filenames:
if bool(re.match("historic_data-....-week_...json", filename)):
valid_filenames.append(filename)
valid_filenames.sort()
# Get the data
historic_data = {}
for filename in valid_filenames:
with open(os.path.join(directory, filename), "r") as file:
try:
document = json.load(file)
date = datetime.datetime.strptime(document["date"],"%Y-%m-%d")
year = date.strftime("%Y")
week = date.strftime("%W")
key = "{year}-week_{week}".format(
year=year,
week=week
)
except (KeyError, ValueError):
err_log("Invalid file in historic data: {filename}. Ignoring.".format(
filename=filename
))
continue
historic_data[key] = document
return historic_data
log(" Done!")
log("")
def _save_json_data_entry(entry_name, entry_data, settings):
log("Generating data entry for {entry_name}".format(
entry_name=entry_name
))
output = settings["output"]
filename = ("{entry_name}.json".format(
entry_name=entry_name.replace(":", "--")
))
log(" Writing file... ({filename})".format(
filename=filename
))
with open(os.path.join(output, filename), "w") as file:
json.dump(entry_data, file)
log(" Done!")
log("")
def _generate_chartjs_data(historic_data, query):
# Data for workload pages
for workload_id in query.workloads(None, None, None, None, list_all=True):
entry_data = {}
# First, get the dates as chart labels
entry_data["labels"] = []
for _,entry in historic_data.items():
date = entry["date"]
entry_data["labels"].append(date)
# Second, get the actual data for everything that's needed
entry_data["datasets"] = []
workload = query.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
workload_conf = query.configs["workloads"][workload_conf_id]
dataset = {}
dataset["data"] = []
dataset["label"] = workload_conf["name"]
dataset["fill"] = "false"
for _,entry in historic_data.items():
try:
size = entry["workloads"][workload_id]["size"]
# The chart needs the size in MB, but just as a number
size_mb = "{0:.1f}".format(size/1024/1024)
dataset["data"].append(size_mb)
except KeyError:
dataset["data"].append("null")
entry_data["datasets"].append(dataset)
entry_name = "chartjs-data--workload--{workload_id}".format(
workload_id=workload_id
)
_save_json_data_entry(entry_name, entry_data, query.settings)
# Data for workload overview pages
for workload_conf_id in query.workloads(None,None,None,None,output_change="workload_conf_ids"):
for repo_id in query.workloads(workload_conf_id,None,None,None,output_change="repo_ids"):
entry_data = {}
# First, get the dates as chart labels
entry_data["labels"] = []
for _,entry in historic_data.items():
date = entry["date"]
entry_data["labels"].append(date)
# Second, get the actual data for everything that's needed
entry_data["datasets"] = []
for workload_id in query.workloads(workload_conf_id, None, repo_id, None, list_all=True):
workload = query.data["workloads"][workload_id]
env_conf_id = workload["env_conf_id"]
env_conf = query.configs["envs"][env_conf_id]
dataset = {}
dataset["data"] = []
dataset["label"] = "in {name} {arch}".format(
name=env_conf["name"],
arch=workload["arch"]
)
dataset["fill"] = "false"
for _,entry in historic_data.items():
try:
size = entry["workloads"][workload_id]["size"]
# The chart needs the size in MB, but just as a number
size_mb = "{0:.1f}".format(size/1024/1024)
dataset["data"].append(size_mb)
except KeyError:
dataset["data"].append("null")
entry_data["datasets"].append(dataset)
entry_name = "chartjs-data--workload-overview--{workload_conf_id}--{repo_id}".format(
workload_conf_id=workload_conf_id,
repo_id=repo_id
)
_save_json_data_entry(entry_name, entry_data, query.settings)
# Data for workload cmp arches pages
for workload_conf_id in query.workloads(None,None,None,None,output_change="workload_conf_ids"):
for env_conf_id in query.workloads(workload_conf_id,None,None,None,output_change="env_conf_ids"):
for repo_id in query.workloads(workload_conf_id,env_conf_id,None,None,output_change="repo_ids"):
workload_conf = query.configs["workloads"][workload_conf_id]
env_conf = query.configs["envs"][env_conf_id]
repo = query.configs["repos"][repo_id]
entry_data = {}
# First, get the dates as chart labels
entry_data["labels"] = []
for _,entry in historic_data.items():
date = entry["date"]
entry_data["labels"].append(date)
# Second, get the actual data for everything that's needed
entry_data["datasets"] = []
for workload_id in query.workloads(workload_conf_id,env_conf_id,repo_id,None,list_all=True):
workload = query.data["workloads"][workload_id]
env_conf_id = workload["env_conf_id"]
env_conf = query.configs["envs"][env_conf_id]
dataset = {}
dataset["data"] = []
dataset["label"] = "{arch}".format(
arch=workload["arch"]
)
dataset["fill"] = "false"
for _,entry in historic_data.items():
try:
size = entry["workloads"][workload_id]["size"]
# The chart needs the size in MB, but just as a number
size_mb = "{0:.1f}".format(size/1024/1024)
dataset["data"].append(size_mb)
except KeyError:
dataset["data"].append("null")
entry_data["datasets"].append(dataset)
entry_name = "chartjs-data--workload-cmp-arches--{workload_conf_id}--{env_conf_id}--{repo_id}".format(
workload_conf_id=workload_conf_id,
env_conf_id=env_conf_id,
repo_id=repo_id
)
_save_json_data_entry(entry_name, entry_data, query.settings)
# Data for workload cmp envs pages
for workload_conf_id in query.workloads(None,None,None,None,output_change="workload_conf_ids"):
for repo_id in query.workloads(workload_conf_id,None,None,None,output_change="repo_ids"):
for arch in query.workloads(workload_conf_id,None,repo_id,None,output_change="arches"):
workload_conf = query.configs["workloads"][workload_conf_id]
env_conf = query.configs["envs"][env_conf_id]
repo = query.configs["repos"][repo_id]
entry_data = {}
# First, get the dates as chart labels
entry_data["labels"] = []
for _,entry in historic_data.items():
date = entry["date"]
entry_data["labels"].append(date)
# Second, get the actual data for everything that's needed
entry_data["datasets"] = []
for workload_id in query.workloads(workload_conf_id,None,repo_id,arch,list_all=True):
workload = query.data["workloads"][workload_id]
repo = query.configs["repos"][repo_id]
dataset = {}
dataset["data"] = []
dataset["label"] = "{repo} {arch}".format(
repo=repo["name"],
arch=workload["arch"]
)
dataset["fill"] = "false"
for _,entry in historic_data.items():
try:
size = entry["workloads"][workload_id]["size"]
# The chart needs the size in MB, but just as a number
size_mb = "{0:.1f}".format(size/1024/1024)
dataset["data"].append(size_mb)
except KeyError:
dataset["data"].append("null")
entry_data["datasets"].append(dataset)
entry_name = "chartjs-data--workload-cmp-envs--{workload_conf_id}--{repo_id}--{arch}".format(
workload_conf_id=workload_conf_id,
repo_id=repo_id,
arch=arch
)
_save_json_data_entry(entry_name, entry_data, query.settings)
# Data for env pages
for env_id in query.envs(None, None, None, list_all=True):
entry_data = {}
# First, get the dates as chart labels
entry_data["labels"] = []
for _,entry in historic_data.items():
date = entry["date"]
entry_data["labels"].append(date)
# Second, get the actual data for everything that's needed
entry_data["datasets"] = []
env = query.data["envs"][env_id]
env_conf_id = env["env_conf_id"]
env_conf = query.configs["envs"][env_conf_id]
dataset = {}
dataset["data"] = []
dataset["label"] = env_conf["name"]
dataset["fill"] = "false"
for _,entry in historic_data.items():
try:
size = entry["envs"][env_id]["size"]
# The chart needs the size in MB, but just as a number
size_mb = "{0:.1f}".format(size/1024/1024)
dataset["data"].append(size_mb)
except KeyError:
dataset["data"].append("null")
entry_data["datasets"].append(dataset)
entry_name = "chartjs-data--env--{env_id}".format(
env_id=env_id
)
_save_json_data_entry(entry_name, entry_data, query.settings)
# Data for env overview pages
for env_conf_id in query.envs(None,None,None,output_change="env_conf_ids"):
for repo_id in query.envs(env_conf_id,None,None,output_change="repo_ids"):
entry_data = {}
# First, get the dates as chart labels
entry_data["labels"] = []
for _,entry in historic_data.items():
date = entry["date"]
entry_data["labels"].append(date)
# Second, get the actual data for everything that's needed
entry_data["datasets"] = []
for env_id in query.envs(env_conf_id, repo_id, None, list_all=True):
env = query.data["envs"][env_id]
env_conf_id = env["env_conf_id"]
env_conf = query.configs["envs"][env_conf_id]
dataset = {}
dataset["data"] = []
dataset["label"] = "in {name} {arch}".format(
name=env_conf["name"],
arch=env["arch"]
)
dataset["fill"] = "false"
for _,entry in historic_data.items():
try:
size = entry["envs"][env_id]["size"]
# The chart needs the size in MB, but just as a number
size_mb = "{0:.1f}".format(size/1024/1024)
dataset["data"].append(size_mb)
except KeyError:
dataset["data"].append("null")
entry_data["datasets"].append(dataset)
entry_name = "chartjs-data--env-overview--{env_conf_id}--{repo_id}".format(
env_conf_id=env_conf_id,
repo_id=repo_id
)
_save_json_data_entry(entry_name, entry_data, query.settings)
# Data for env cmp arches pages
for env_conf_id in query.envs(None,None,None,output_change="env_conf_ids"):
for repo_id in query.envs(env_conf_id,None,None,output_change="repo_ids"):
env_conf = query.configs["envs"][env_conf_id]
env_conf = query.configs["envs"][env_conf_id]
repo = query.configs["repos"][repo_id]
entry_data = {}
# First, get the dates as chart labels
entry_data["labels"] = []
for _,entry in historic_data.items():
date = entry["date"]
entry_data["labels"].append(date)
# Second, get the actual data for everything that's needed
entry_data["datasets"] = []
for env_id in query.envs(env_conf_id,repo_id,None,list_all=True):
env = query.data["envs"][env_id]
dataset = {}
dataset["data"] = []
dataset["label"] = "{arch}".format(
arch=env["arch"]
)
dataset["fill"] = "false"
for _,entry in historic_data.items():
try:
size = entry["envs"][env_id]["size"]
# The chart needs the size in MB, but just as a number
size_mb = "{0:.1f}".format(size/1024/1024)
dataset["data"].append(size_mb)
except KeyError:
dataset["data"].append("null")
entry_data["datasets"].append(dataset)
entry_name = "chartjs-data--env-cmp-arches--{env_conf_id}--{repo_id}".format(
env_conf_id=env_conf_id,
repo_id=repo_id
)
_save_json_data_entry(entry_name, entry_data, query.settings)
# Data for compose view pages
for view_conf_id in query.configs["views"].keys():
for arch in query.arches_in_view(view_conf_id):
entry_data = {}
# First, get the dates as chart labels
entry_data["labels"] = []
for _,entry in historic_data.items():
date = entry["date"]
entry_data["labels"].append(date)
# Second, get the actual data for everything that's needed
entry_data["datasets"] = []
dataset = {}
dataset["data"] = []
dataset["label"] = "Number of packages"
dataset["fill"] = "false"
for _,entry in historic_data.items():
try:
count = entry["views"][view_conf_id][arch]["pkg_count"]
dataset["data"].append(count)
except KeyError:
dataset["data"].append("null")
entry_data["datasets"].append(dataset)
entry_name = "chartjs-data--view--{view_conf_id}--{arch}".format(
view_conf_id=view_conf_id,
arch=arch
)
_save_json_data_entry(entry_name, entry_data, query.settings)
def generate_historic_data(query):
log("")
log("###############################################################################")
log("### Historic Data #############################################################")
log("###############################################################################")
log("")
# Save historic package lists
_save_package_history(query)
# Step 1: Save current data
_save_current_historic_data(query)
# Step 2: Read historic data
historic_data = _read_historic_data(query)
# Step 3: Generate Chart.js data
_generate_chartjs_data(historic_data, query)
log("Done!")
log("")
###############################################################################
### Maintainer Recommendation #################################################
###############################################################################
class OwnershipEngine:
# Levels:
#
#
# level0 == required
# ---
# level1 == 1st level runtime dep
# ...
# level9 == 9th level runtime dep
#
#
# level10 == build dep of something in the previous group
# ---
# level11 == 1st level runtime dep
# ...
# level19 == 9th level runtime dep
#
#
# level20 == build dep of something in the previous group
# level21 == 1st level runtime dep
# ...
# level29 == 9th level runtime dep
#
# etc. up to level99
def __init__(self, query):
self.query = query
self.MAX_LEVEL = 9
self.MAX_LAYER = 9
self.skipped_maintainers = ["bakery", "jwboyer", "asamalik"]
def process_view(self, view_conf_id):
self._initiate_view(view_conf_id)
log("Processing ownership recommendations for {} view...".format(view_conf_id))
# Layer 0
log(" Processing Layer 0...")
self._process_layer_zero_entries()
self._process_layer_component_maintainers()
# Layers 1-9
# This operates on all SRPMs from the previous level.
# Resolves all their build dependencies.
previous_layer_srpms = self.runtime_srpm_names
for layer in range(1, self.MAX_LAYER + 1):
log(" Processing Layer {}...".format(layer))
log(" {} components".format(len(previous_layer_srpms)))
# Process all the "pkg_entries" for this layer, and collect this layer srpm packages
# which will be used in the next layer.
this_layer_srpm_packages = self._process_layer_pkg_entries(layer, previous_layer_srpms)
self._process_layer_srpm_entries(layer)
self._process_layer_component_maintainers()
previous_layer_srpms = this_layer_srpm_packages
log("Done!")
log("")
return self.component_maintainers
def _process_layer_pkg_entries(self, layer, build_srpm_names):
if layer not in range(1,10):
raise ValueError
level_srpm_packages = set()
for build_srpm_name in build_srpm_names:
# Packages on level 0 == required
level = 0
level_name = "level{}{}".format(layer, level)
level0_pkg_names = set()
# This will initially hold all packages.
# When figuring out levels, I'll process each package just once.
# And for that I'll be removing them from this set as I go.
remaining_pkg_names = self.buildroot_only_rpm_names.copy()
#for pkg_name, pkg in self.buildroot_pkgs.items():
for pkg_name in remaining_pkg_names.copy():
pkg = self.buildroot_pkgs[pkg_name]
if build_srpm_name in pkg["required_by_srpms"]:
if "source_name" not in self.pkg_entries[pkg_name]:
self.pkg_entries[pkg_name]["source_name"] = pkg["source_name"]
self.pkg_entries[pkg_name][level_name]["build_source_names"].add(build_srpm_name)
level0_pkg_names.add(pkg_name)
remaining_pkg_names.discard(pkg_name)
level_srpm_packages.add(pkg["source_name"])
pkg_names_level = []
pkg_names_level.append(level0_pkg_names)
# Starting at level 1, because level 0 is already done (that's required packages)
for level in range(1, self.MAX_LEVEL + 1):
level_name = "level{}{}".format(layer, level)
#1..
pkg_names_level.append(set())
#for pkg_name, pkg in self.buildroot_pkgs.items():
for pkg_name in remaining_pkg_names.copy():
pkg = self.buildroot_pkgs[pkg_name]
for higher_pkg_name in pkg["required_by"]:
if higher_pkg_name in pkg_names_level[level - 1]:
if "source_name" not in self.pkg_entries[pkg_name]:
self.pkg_entries[pkg_name]["source_name"] = pkg["source_name"]
self.pkg_entries[pkg_name][level_name]["build_source_names"].add(build_srpm_name)
pkg_names_level[level].add(pkg_name)
remaining_pkg_names.discard(pkg_name)
level_srpm_packages.add(pkg["source_name"])
return level_srpm_packages
def _process_layer_srpm_entries(self, layer):
if layer not in range(1,10):
raise ValueError
for pkg_name, pkg in self.pkg_entries.items():
if "source_name" not in pkg:
continue
source_name = pkg["source_name"]
for level in range(0, self.MAX_LEVEL + 1):
level_name = "level{}{}".format(layer, level)
for build_srpm_name in pkg[level_name]["build_source_names"]:
top_maintainers = self.component_maintainers[build_srpm_name]["top_multiple"]
for maintainer in top_maintainers:
if maintainer not in self.srpm_entries[source_name]["ownership"][level_name]:
self.srpm_entries[source_name]["ownership"][level_name][maintainer] = {}
self.srpm_entries[source_name]["ownership"][level_name][maintainer]["build_source_names"] = {}
self.srpm_entries[source_name]["ownership"][level_name][maintainer]["pkg_count"] = 0
if build_srpm_name not in self.srpm_entries[source_name]["ownership"][level_name][maintainer]["build_source_names"]:
self.srpm_entries[source_name]["ownership"][level_name][maintainer]["build_source_names"][build_srpm_name] = set()
self.srpm_entries[source_name]["ownership"][level_name][maintainer]["pkg_count"] += 1
self.srpm_entries[source_name]["ownership"][level_name][maintainer]["build_source_names"][build_srpm_name].add(pkg_name)
def _process_layer_component_maintainers(self):
clear_components = set()
unclear_components = set()
for component_name, owner_data in self.srpm_entries.items():
if self.component_maintainers[component_name]["top"]:
continue
found = False
maintainers = {}
top_maintainer = None
top_maintainers = set()
for level_name, level_data in owner_data["ownership"].items():
if found:
break
if not level_data:
continue
for maintainer, maintainer_data in level_data.items():
if maintainer in self.skipped_maintainers:
continue
found = True
maintainers[maintainer] = maintainer_data["pkg_count"]
# Sort out maintainers based on their score
maintainer_scores = {}
for maintainer, score in maintainers.items():
if score not in maintainer_scores:
maintainer_scores[score] = set()
maintainer_scores[score].add(maintainer)
# Going through the scores, starting with the highest
for score in sorted(maintainer_scores, reverse=True):
# If there are multiple with the same score, it's unclear
if len(maintainer_scores[score]) > 1:
for chosen_maintainer in maintainer_scores[score]:
top_maintainers.add(chosen_maintainer)
break
# If there's just one maintainer with this score, it's the owner!
if len(maintainer_scores[score]) == 1:
for chosen_maintainer in maintainer_scores[score]:
top_maintainer = chosen_maintainer
top_maintainers.add(chosen_maintainer)
break
self.component_maintainers[component_name]["all"] = maintainers
self.component_maintainers[component_name]["top_multiple"] = top_maintainers
self.component_maintainers[component_name]["top"] = top_maintainer
def _initiate_view(self, view_conf_id):
self.view_conf_id = view_conf_id
self.all_arches = self.query.arches_in_view(view_conf_id)
self.workload_ids = self.query.workloads_in_view(view_conf_id, None)
self.pkg_entries = {}
self.srpm_entries = {}
self.component_maintainers = {}
self.runtime_rpm_names = set()
self.runtime_srpm_names = set()
self.buildroot_rpm_names = set()
self.buildroot_srpm_names = set()
self.buildroot_only_rpm_names = set()
self.buildroot_only_srpm_names = set()
self.all_rpm_names = set()
self.all_srpm_names = set()
self.buildroot_pkgs = {}
# {
# "RPM_NAME": {
# "source_name": "SRPM_NAME",
# "required_by": set(
# "RPM_NAME",
# "RPM_NAME",
# ),
# "required_by_srpms": set(
# "SRPM_NAME",
# "SRPM_NAME",
# ),
# }
# }
### Initiate: self.runtime_rpm_names
for arch in self.all_arches:
self.runtime_rpm_names.update(self.query.pkgs_in_view(view_conf_id, arch, output_change="binary_names"))
### Initiate: self.runtime_srpm_names
for arch in self.all_arches:
self.runtime_srpm_names.update(self.query.pkgs_in_view(view_conf_id, arch, output_change="source_names"))
### Initiate: self.buildroot_pkgs
build_dependencies = {}
for arch in self.all_arches:
for pkg_name, pkg_data in self.query.view_buildroot_pkgs(view_conf_id, arch).items():
if pkg_name not in build_dependencies:
build_dependencies[pkg_name] = {}
build_dependencies[pkg_name]["required_by"] = set()
build_dependencies[pkg_name]["required_by"] = build_dependencies[pkg_name]["required_by"].union(pkg_data["required_by"])
buildroot_pkg_relations = {}
for buildroot_pkg_relations_conf_id, buildroot_pkg_relations_conf in self.query.configs["buildroot_pkg_relations"].items():
if view_conf_id == buildroot_pkg_relations_conf["view_id"]:
arch = buildroot_pkg_relations_conf["arch"]
arch_buildroot_pkg_relations = buildroot_pkg_relations_conf["pkg_relations"]
for pkg_id, pkg_data in arch_buildroot_pkg_relations.items():
pkg_name = pkg_id_to_name(pkg_id)
if pkg_name not in buildroot_pkg_relations:
buildroot_pkg_relations[pkg_name] = {}
buildroot_pkg_relations[pkg_name]["source_name"] = pkg_data["source_name"]
buildroot_pkg_relations[pkg_name]["required_by"] = set()
for required_by_pkg_id in pkg_data["required_by"]:
required_by_pkg_name = pkg_id_to_name(required_by_pkg_id)
buildroot_pkg_relations[pkg_name]["required_by"].add(required_by_pkg_name)
for pkg_name, pkg in buildroot_pkg_relations.items():
if pkg_name not in build_dependencies:
continue
self.buildroot_pkgs[pkg_name] = {}
self.buildroot_pkgs[pkg_name]["source_name"] = pkg["source_name"]
self.buildroot_pkgs[pkg_name]["required_by"] = pkg["required_by"]
self.buildroot_pkgs[pkg_name]["required_by_srpms"] = build_dependencies[pkg_name]["required_by"]
### Initiate: self.buildroot_srpm_names
for pkg_name, pkg in self.buildroot_pkgs.items():
self.buildroot_srpm_names.add(pkg["source_name"])
### Initiate: self.buildroot_rpm_names
self.buildroot_rpm_names = set(self.buildroot_pkgs.keys())
### Initiate: Other lists
self.all_rpm_names = self.runtime_rpm_names.union(self.buildroot_rpm_names)
self.all_srpm_names = self.runtime_srpm_names.union(self.buildroot_srpm_names)
self.buildroot_only_rpm_names = self.buildroot_rpm_names.difference(self.runtime_rpm_names)
self.buildroot_only_srpm_names = self.buildroot_srpm_names.difference(self.runtime_srpm_names)
### Initiate: self.pkg_entries
for pkg_name in self.all_rpm_names:
self.pkg_entries[pkg_name] = {}
self.pkg_entries[pkg_name]["name"] = pkg_name
for layer in range(0, self.MAX_LAYER + 1):
for level in range(0, self.MAX_LEVEL + 1):
if layer == 0:
level_name = "level{}".format(level)
self.pkg_entries[pkg_name][level_name] = {}
self.pkg_entries[pkg_name][level_name]["workload_requirements"] = {}
else:
level_name = "level{}{}".format(layer, level)
self.pkg_entries[pkg_name][level_name] = {}
self.pkg_entries[pkg_name][level_name]["build_source_names"] = set()
### Initiate: self.srpm_entries
for srpm_name in self.all_srpm_names:
self.srpm_entries[srpm_name] = {}
self.srpm_entries[srpm_name]["ownership"] = {}
for layer in range(0, self.MAX_LAYER + 1):
for level in range(0, self.MAX_LEVEL + 1):
if layer == 0:
level_name = "level{}".format(level)
self.srpm_entries[srpm_name]["ownership"][level_name] = {}
else:
level_name = "level{}{}".format(layer, level)
self.srpm_entries[srpm_name]["ownership"][level_name] = {}
### Initiate: self.component_maintainers
for srpm_name in self.all_srpm_names:
self.component_maintainers[srpm_name] = {}
self.component_maintainers[srpm_name]["all"] = {}
self.component_maintainers[srpm_name]["top_multiple"] = set()
self.component_maintainers[srpm_name]["top"] = None
def _pkg_relations_ids_to_names(self, pkg_relations):
if not pkg_relations:
return pkg_relations
pkg_relations_names = {}
for pkg_id, pkg in pkg_relations.items():
pkg_name = pkg_id_to_name(pkg_id)
pkg_relations_names[pkg_name] = {}
pkg_relations_names[pkg_name]["required_by"] = set()
for required_by_pkg_id in pkg["required_by"]:
required_by_pkg_name = pkg_id_to_name(required_by_pkg_id)
pkg_relations_names[pkg_name]["required_by"].add(required_by_pkg_name)
return pkg_relations_names
def _process_layer_zero_entries(self):
# This is first done on an RPM level. Starts with level 0 == required,
# assigns them based on who required them. Then moves on to level 1 == 1st
# level depepdencies, and assigns them based on who pulled them in in
# the above layer. And it goes deeper and deeper until MAX_LEVEL.
#
# The second part of the function then takes this data from RPMs and
# copies them over to their SRPMs. When multiple RPMs belong to a single
# SRPM, it merges it.
#
#
# Part 1: RPMs
#
workload_ids = self.query.workloads_in_view(self.view_conf_id, None)
for workload_id in workload_ids:
workload = self.query.data["workloads"][workload_id]
workload_conf_id = workload["workload_conf_id"]
workload_conf = self.query.configs["workloads"][workload_conf_id]
workload_maintainer = workload_conf["maintainer"]
pkgs = self.query.workload_pkgs_id(workload_id)
pkg_relations_ids = workload["pkg_relations"]
pkg_relations = self._pkg_relations_ids_to_names(pkg_relations_ids)
# Packages on level 0 == required
level0_pkg_names = {}
# This will initially hold all packages.
# When figuring out levels, I'll process each package just once.
# And for that I'll be removing them from this set as I go.
remaining_pkg_names = set()
for pkg in pkgs:
pkg_name = pkg["name"]
remaining_pkg_names.add(pkg_name)
if "source_name" not in self.pkg_entries[pkg_name]:
self.pkg_entries[pkg_name]["source_name"] = pkg["source_name"]
# Is this package level 1?
if workload_id in pkg["q_required_in"]:
if workload_conf_id not in self.pkg_entries[pkg_name]["level0"]["workload_requirements"]:
self.pkg_entries[pkg_name]["level0"]["workload_requirements"][workload_conf_id] = set()
#level0_pkg_names.add(pkg_name)
if pkg_name not in level0_pkg_names:
level0_pkg_names[pkg_name] = set()
level0_pkg_names[pkg_name].add((None, pkg_name))
remaining_pkg_names.remove(pkg_name)
# Initialize sets for all levels
pkg_names_level = []
pkg_names_level.append(level0_pkg_names)
# Starting at level 1, because level 0 is already done (that's required packages)
for current_level in range(1, self.MAX_LEVEL + 1):
#1..
#pkg_names_level.append(set())
pkg_names_level.append({})
for pkg_name in remaining_pkg_names.copy():
pkg = self.pkg_entries[pkg_name]
# is pkg required by higher_pkg_name (which is level 1)?
# (== is higher_pkg_name in a list of packages that pkg is required by?)
# then pkg is level 2
for higher_pkg_name in pkg_names_level[current_level - 1]:
if higher_pkg_name in pkg_relations[pkg_name]["required_by"]:
#pkg_names_level[current_level].add(pkg_name)
if pkg_name not in pkg_names_level[current_level]:
pkg_names_level[current_level][pkg_name] = set()
pkg_names_level[current_level][pkg_name].add((higher_pkg_name, pkg_name))
try:
remaining_pkg_names.remove(pkg_name)
except KeyError:
pass
# Some might remain for weird reasons
for pkg_name in remaining_pkg_names:
#pkg_names_level[self.MAX_LEVEL].add(pkg_name)
if pkg_name not in pkg_names_level[self.MAX_LEVEL]:
pkg_names_level[self.MAX_LEVEL][pkg_name] = set()
for current_level in range(0, self.MAX_LEVEL + 1):
level_name = "level{num}".format(num=str(current_level))
for pkg_name in self.pkg_entries:
pkg = self.pkg_entries[pkg_name]
if pkg_name in pkg_names_level[current_level]:
if workload_conf_id not in self.pkg_entries[pkg_name][level_name]["workload_requirements"]:
self.pkg_entries[pkg_name][level_name]["workload_requirements"][workload_conf_id] = set()
self.pkg_entries[pkg_name][level_name]["workload_requirements"][workload_conf_id].update(pkg_names_level[current_level][pkg_name])
#
# Part 2: SRPMs
#
for pkg_name, pkg in self.pkg_entries.items():
if "source_name" not in pkg:
continue
source_name = pkg["source_name"]
for current_level in range(0, self.MAX_LEVEL + 1):
level_name = "level{num}".format(num=str(current_level))
for workload_conf_id, pkg_names_requiring_this in pkg[level_name]["workload_requirements"].items():
maintainer = self.query.configs["workloads"][workload_conf_id]["maintainer"]
if maintainer not in self.srpm_entries[source_name]["ownership"][level_name]:
self.srpm_entries[source_name]["ownership"][level_name][maintainer] = {}
self.srpm_entries[source_name]["ownership"][level_name][maintainer]["workloads"] = {}
self.srpm_entries[source_name]["ownership"][level_name][maintainer]["pkg_names"] = set()
self.srpm_entries[source_name]["ownership"][level_name][maintainer]["pkg_count"] = 0
if workload_conf_id not in self.srpm_entries[source_name]["ownership"][level_name][maintainer]["workloads"]:
self.srpm_entries[source_name]["ownership"][level_name][maintainer]["workloads"][workload_conf_id] = set()
self.srpm_entries[source_name]["ownership"][level_name][maintainer]["workloads"][workload_conf_id].add(pkg_name)
self.srpm_entries[source_name]["ownership"][level_name][maintainer]["pkg_names"].update(pkg_names_requiring_this)
self.srpm_entries[source_name]["ownership"][level_name][maintainer]["pkg_count"] = len(self.srpm_entries[source_name]["ownership"][level_name][maintainer]["pkg_names"])
def perform_additional_analyses(query):
for view_conf_id in query.configs["views"]:
view_conf = query.configs["views"][view_conf_id]
# This function is now only used for the obsolete dep_tracker build strategy
if view_conf["type"] == "compose" and view_conf["buildroot_strategy"] == "dep_tracker":
if "views" not in query.computed_data:
query.computed_data["views"] = {}
if not view_conf_id in query.computed_data["views"]:
query.computed_data["views"][view_conf_id] = {}
# Resolve ownership recommendations
# This is currently only supported for compose views, not addon views
if view_conf["type"] == "compose":
ownership_engine = OwnershipEngine(query)
component_maintainers = ownership_engine.process_view(view_conf_id)
query.computed_data["views"][view_conf_id]["srpm_maintainers"] = component_maintainers
query.computed_data["views"][view_conf_id]["ownership_recommendations"] = ownership_engine.srpm_entries
###############################################################################
### Main ######################################################################
###############################################################################
def main():
# -------------------------------------------------
# Stage 1: Data collection and analysis using DNF
# -------------------------------------------------
# measuring time of execution
time_started = datetime_now_string()
settings = load_settings()
if settings["use_cache"]:
configs = load_data("cache_configs.json")
data = load_data("cache_data.json")
else:
configs = get_configs(settings)
analyzer = Analyzer(configs, settings)
data = analyzer.analyze_things()
if settings["dev_buildroot"]:
dump_data("cache_configs.json", configs)
dump_data("cache_data.json", data)
settings["global_refresh_time_started"] = datetime.datetime.now().strftime("%-d %B %Y %H:%M UTC")
# -------------------------------------------------
# Stage 2: Additional analysis
# -------------------------------------------------
query = Query(data, configs, settings)
perform_additional_analyses(query)
# measuring time of execution
time_analysis_time = datetime_now_string()
# -------------------------------------------------
# Stage 3: Generating pages and data outputs
# -------------------------------------------------
generate_pages(query)
generate_historic_data(query)
# -------------------------------------------------
# Done! Printing final summary
# -------------------------------------------------
# measuring time of execution
time_ended = datetime_now_string()
log("")
log("=============================")
log("Feedback Pipeline build done!")
log("=============================")
log("")
log(" Started: {}".format(time_started))
log(" Analysis done: {}".format(time_analysis_time))
log(" Finished: {}".format(time_ended))
log("")
if __name__ == "__main__":
main()
|
# It's an env!
if len(id_components) == 3:
env_conf_id = id_components[0]
repo_id = id_components[1]
arch = id_components[2]
return self.env_pkgs(env_conf_id, repo_id, arch)
# It's a workload!
if len(id_components) == 4:
workload_conf_id = id_components[0]
env_conf_id = id_components[1]
repo_id = id_components[2]
arch = id_components[3]
return self.env_pkgs(env_conf_id, repo_id, arch)
raise ValueError("That seems to be an invalid ID!")
@lru_cache(maxsize = Non |
platform.rs | //! This module provides helpers for platform specific logic.
use std::convert::TryFrom;
// ================
// === Platform ===
// ================
/// This enumeration lists all the supported platforms.
#[derive(Debug,Clone,Copy,PartialEq,Eq)]
pub enum Platform {
Android,
FreeBSD,
IOS,
Linux,
MacOS,
OpenBSD,
Windows,
}
pub use Platform::*;
#[allow(missing_docs)]
impl Platform {
pub fn is_android (self) -> bool { self == Android }
pub fn is_freebsd (self) -> bool { self == FreeBSD }
pub fn is_ios (self) -> bool { self == IOS }
pub fn is_linux (self) -> bool { self == Linux }
pub fn is_macos (self) -> bool { self == MacOS }
pub fn is_openbsd (self) -> bool { self == OpenBSD }
pub fn is_windows (self) -> bool { self == Windows }
}
#[derive(Clone,Copy,Debug)]
pub struct UnknownPlatform;
impl TryFrom<&str> for Platform {
type Error = UnknownPlatform;
#[allow(clippy::if_same_then_else)]
fn try_from(s:&str) -> Result<Self,Self::Error> {
let name = s.to_lowercase();
if name.contains("darwin") { Ok(MacOS) }
else if name.contains("mac") { Ok(MacOS) }
else if name.contains("linux") { Ok(Linux) }
// CAREFUL: this matches also "darwin" (that's why its declared below):
else if name.contains("win") { Ok(Windows) }
else if name.contains("ios") { Ok(IOS) }
else if name.contains("iphone") { Ok(IOS) }
else if name.contains("ipad") { Ok(IOS) }
else if name.contains("android") { Ok(Android) }
else if name.contains("freebsd") { Ok(FreeBSD) }
else if name.contains("openbsd") { Ok(OpenBSD) }
else if name.contains("bsd") { Ok(FreeBSD) }
else { Err(UnknownPlatform) }
}
}
impl TryFrom<String> for Platform {
type Error = UnknownPlatform;
fn try_from(s:String) -> Result<Self,Self::Error> {
Platform::try_from(s.as_str())
}
}
// ================================
// === Compile Time Redirection ===
// ================================
/// Queries which platform we are on.
#[cfg(target_arch="wasm32")]
pub fn current() -> Option<Platform> {
current_wasm()
}
/// Queries which platform we are on.
#[cfg(not(target_arch="wasm32"))]
pub fn current() -> Option<Platform> {
current_native()
}
// ====================
// === Current WASM ===
// ====================
/// Queries which platform we are on.
#[allow(clippy::if_same_then_else)]
pub fn current_wasm() -> Option<Platform> {
use super::window;
let window = window();
let navigator = window.navigator();
let platform = navigator.platform().unwrap_or_default().to_lowercase();
let agent = navigator.user_agent().unwrap_or_default().to_lowercase();
Platform::try_from(platform).or_else(|_|Platform::try_from(agent)).ok()
}
// ======================
// === Current Native ===
// ======================
#[cfg(target_os="android")] fn current_native() -> Option<Platform> { Some(Android) }
#[cfg(target_os="ios")] fn current_native() -> Option<Platform> |
#[cfg(target_os="linux")] fn current_native() -> Option<Platform> { Some(Linux) }
#[cfg(target_os="macos")] fn current_native() -> Option<Platform> { Some(MacOS) }
#[cfg(target_os="windows")] fn current_native() -> Option<Platform> { Some(Windows) }
#[cfg(not(any(
target_arch = "wasm32",
target_os = "android",
target_os = "ios",
target_os = "linux",
target_os = "macos",
target_os = "windows"
)))] fn current_native() -> Option<Platform> { None }
// =============
// === Tests ===
// =============
#[cfg(all(test,any(target_os="linux",target_os="windows",target_os="macos")))]
mod test {
use super::*;
use wasm_bindgen_test::wasm_bindgen_test;
use wasm_bindgen_test::wasm_bindgen_test_configure;
wasm_bindgen_test_configure!(run_in_browser);
#[wasm_bindgen_test]
fn platform() {
assert_eq!(current(),current_native())
}
}
| { Some(IOS) } |
seelog.go | package seelog
import (
"bufio"
"io"
"log"
"net/http"
"os"
"path/filepath"
"strconv"
)
func pwd() string {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatal(err)
}
return dir
}
//FileDownload 下载最新日志文件
func FileDownload(w http.ResponseWriter, r *http.Request) {
filename := r.FormValue("file")
dir := pwd()
file, err := os.Open(dir + "/logs/" + filename + ".log")
if err != nil {
log.Println(err)
return
}
defer file.Close()
fileHeader := make([]byte, 512)
file.Read(fileHeader)
fileStat, err1 := file.Stat()
if err1 != nil {
log.Println(err1)
return
}
w.Header().Set("Content-Disposition", "attachment; filename="+filename)
w.Header().Set("Content-Type", http.DetectContentType(fileHeader))
w.Header().Set("Content-Length", strconv.FormatInt(fileStat.Size(), 10))
file.Seek(0, 0)
io.Copy(w, file)
return
}
//FileShow 查看日志文件的指定最新行数的内容
func FileShow(w http.ResponseWriter, r *http.Request) {
var cu = []int{}
filename := r.FormValue("fi | le")
lines, err := strconv.Atoi(r.FormValue("lines"))
if err != nil {
lines = 200
}
dir := pwd()
file, err := os.Open(dir + "/logs/" + filename + ".log")
if err != nil {
log.Println(err)
return
}
defer file.Close()
fd := bufio.NewReader(file)
count := 0
l2 := 0
for {
result, err := fd.ReadBytes('\n')
l1 := len(result)
l2 = l2 + l1
cu = append(cu, l2)
if err != nil {
break
}
count++
}
if count > lines {
ofs := int64(cu[int(count-lines-1)])
file.Seek(ofs, 0)
io.Copy(w, file)
} else {
file.Seek(0, 0)
io.Copy(w, file)
}
return
}
|
|
partially-mixtyped.spec.ts | import { EventIdentifier, ObservedValue, EventIdentifierStrict } from "../package";
import { AssertTrue, AreEqual, Method } from "./utils";
class SomeType { private _ = "" }
const someSymbol = Symbol("foo");
class PartiallyMixtypedNodeEmitter {
addListener(
eventName: string | symbol,
listener: ((a0: number) => void) // it's number instead of ...args: any[]
): void;
addListener(
eventName: "event-a",
listener: (a0: number, a1: string) => void
): void;
addListener(
eventName: typeof someSymbol,
listener: (a0: SomeType) => void
): void;
addListener(
eventName:
| "event-a"
| typeof someSymbol,
listener:
| ((a0: number) => void)
| ((a0: number, a1: string) => void)
| ((a0: SomeType) => void)
) {}
removeListener(
eventName: string | symbol,
listener: ((a0: number) => void)
): void;
removeListener(
eventName: "event-a",
listener: (a0: number, a1: string) => void
): void;
removeListener(
eventName: typeof someSymbol,
listener: (a0: SomeType) => void
): void;
removeListener(
eventName:
| "event-a"
| typeof someSymbol,
listener:
| ((a0: number) => void)
| ((a0: number, a1: string) => void)
| ((a0: SomeType) => void)
) {}
}
type ExpectedEventName =
| "event-a"
| typeof someSymbol
| symbol
| string;
type ExpectedStrictEventName =
| "event-a"
| typeof someSymbol;
type ExpectedObservedValue<I> =
I extends "event-a" ? [number, string] :
I extends typeof someSymbol ? SomeType :
I extends string | symbol ? number :
never;
type E = PartiallyMixtypedNodeEmitter;
type M = Method<E>;
type ActualEventIdentifier = EventIdentifier<E, M>;
type ActualEventIdentifierStrict = EventIdentifierStrict<E, M>;
type ActualObservedValue<I extends EventIdentifier<E, M>> = ObservedValue<E, M, I>;
| AreEqual<
ExpectedEventName,
ActualEventIdentifier
>,
AreEqual<
ExpectedStrictEventName,
ActualEventIdentifierStrict
>,
AreEqual<
ExpectedObservedValue<"event-a">,
ActualObservedValue<"event-a">
>,
AreEqual<
ExpectedObservedValue<typeof someSymbol>,
ActualObservedValue<typeof someSymbol>
>,
AreEqual<
ExpectedObservedValue<string>,
ActualObservedValue<string>
>,
AreEqual<
ExpectedObservedValue<"non-existent-event">,
ActualObservedValue<"non-existent-event">
>
];
type Works = AssertTrue<Tests[number]> | type Tests = [ |
crypto.go | package indexer
import (
"fmt"
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum/go-ethereum/common" |
// ParseL1Address parses a L1 ETH addres from a hex string. This method will
// fail if the address is not a valid hexidecimal address.
func ParseL1Address(address string) (common.Address, error) {
if common.IsHexAddress(address) {
return common.HexToAddress(address), nil
}
return common.Address{}, fmt.Errorf("invalid address: %v", address)
}
// ParseL2Address parses a L2 ETH addres from a hex string. This method will
// fail if the address is not a valid hexidecimal address.
func ParseL2Address(address string) (l2common.Address, error) {
if l2common.IsHexAddress(address) {
return l2common.HexToAddress(address), nil
}
return l2common.Address{}, fmt.Errorf("invalid address: %v", address)
} | ) |
operations.spec.ts | import {
IPane,
IPaneContent,
PaneSplitAxis,
} from './types'
import {
setCurrentPane,
addContentToCurrentPane,
setPaneCurrentContent,
removePaneContent,
splitPane,
} from './operations'
interface IPanesContents {
[paneId: string]: string[]
}
const getPaneFixtures = (
seeds: string[],
contentsSeeds: IPanesContents = {}
): Array<IPane<any>> => seeds.map(id => {
let contents: Array<IPaneContent<any>> = []
if (contentsSeeds[id] !== undefined) {
contents = contentsSeeds[id].map(contentId => ({
id: contentId,
type: 'test',
isUnique: false,
isCurrent: false,
}))
}
return {
id,
isCurrent: false,
split: false,
splitAxis: PaneSplitAxis.Horizontal,
contents,
children: [],
}
})
describe('setCurrentPane', () => {
it('should set pane matching provided paneId current pane', () => {
const panes = getPaneFixtures(['a', 'b', 'c'])
const res = setCurrentPane(panes, 'a')
expect(res).not.toBe(panes)
expect(res[0].isCurrent).toBeTruthy()
expect(res[1].isCurrent).toBeFalsy()
expect(res[2].isCurrent).toBeFalsy()
})
it('should leave panes untouched if pane is already the current one', () => {
const panes = getPaneFixtures(['a', 'b', 'c'])
panes[0].isCurrent = true
const res = setCurrentPane(panes, 'a')
expect(res).toBe(panes)
})
it('should leave panes already in desired state untouched', () => {
const panes = getPaneFixtures(['a', 'b', 'c'])
const res = setCurrentPane(panes, 'a')
expect(res).not.toBe(panes)
expect(res[0].isCurrent).toBeTruthy()
expect(res[1]).toBe(panes[1])
expect(res[2]).toBe(panes[2])
})
it('should throw if panes is empty', () => {
expect(() => {
setCurrentPane([], 'invalid')
}).toThrow(`there's no available pane`)
})
it('should throw if paneId does not exist', () => {
expect(() => {
setCurrentPane(getPaneFixtures(['a']), 'invalid')
}).toThrow('no pane found for id: invalid, available panes: a')
})
})
describe('addContentToCurrentPane', () => {
it(`should append provided content to current pane`, () => {
const panes = getPaneFixtures(['a'])
panes[0].isCurrent = true
const content = {
id: 'test',
type: 'test',
isCurrent: true,
isUnique: false,
}
const res = addContentToCurrentPane(panes, content)
expect(res[0].contents).toHaveLength(1)
expect(res[0].contents[0]).toEqual(content)
})
it(`should throw if there's no current pane`, () => {
expect(() => {
addContentToCurrentPane(getPaneFixtures(['a']), {
id: 'test',
type: 'test',
isCurrent: false,
isUnique: false,
})
}).toThrow('unable to find a current pane')
})
})
describe('setPaneCurrentContent', () => {
it('should set pane matching provided paneId current pane, and content matching provided contentId current content', () => {
const panes = getPaneFixtures(['a', 'b'], {
a: ['a.0', 'a.1'],
b: ['b.0', 'b.1'],
})
panes[0].contents[0].isCurrent = true
panes[1].isCurrent = true
const res = setPaneCurrentContent(panes, 'a', 'a.1')
expect(res).not.toBe(panes)
expect(res[0].isCurrent).toBeTruthy()
expect(res[0].contents[0].isCurrent).toBeFalsy()
expect(res[0].contents[1].isCurrent).toBeTruthy()
expect(res[1].isCurrent).toBeFalsy()
expect(res[1].contents[0].isCurrent).toBeFalsy()
expect(res[1].contents[1].isCurrent).toBeFalsy()
})
it('should leave panes and contents untouched if already in desired state', () => {
const panes = getPaneFixtures(['a', 'b'], {
a: ['a.0', 'a.1'],
b: ['b.0', 'b.1'],
})
panes[0].isCurrent = true
panes[0].contents[0].isCurrent = true
const res = setPaneCurrentContent(panes, 'a', 'a.0')
expect(res).toBe(panes)
})
it('should leave panes and contents already in desired state untouched', () => {
const panes = getPaneFixtures(['a', 'b'], {
a: ['a.0', 'a.1'],
b: ['b.0', 'b.1'],
})
const res = setPaneCurrentContent(panes, 'a', 'a.0')
expect(res).not.toBe(panes)
expect(res[0]).not.toBe(panes[0])
expect(res[0].isCurrent).toBeTruthy()
expect(res[0].contents[0].isCurrent).toBeTruthy()
expect(res[0].contents[1]).toBe(panes[0].contents[1])
expect(res[1]).toBe(panes[1])
expect(res[1].contents[0]).toBe(panes[1].contents[0])
expect(res[1].contents[1]).toBe(panes[1].contents[1])
})
it('should throw if panes is empty', () => {
expect(() => {
setPaneCurrentContent([], 'invalid', 'contentId')
}).toThrow(`there's no available pane`)
})
it('should throw if paneId does not exist', () => {
expect(() => {
setPaneCurrentContent(getPaneFixtures(['a']), 'invalid', 'contentId')
}).toThrow('no pane found for id: invalid, available panes: a')
})
it('should throw if pane does not have any content', () => {
expect(() => {
setPaneCurrentContent(getPaneFixtures(['a']), 'a', 'invalid')
}).toThrow(`pane a doesn't have any content`) | expect(() => {
setPaneCurrentContent(getPaneFixtures(['a'], { a: ['a.0'] }), 'a', 'invalid')
}).toThrow('no content found in pane: a for id: invalid, available contents: a.0')
})
})
describe('removePaneContent', () => {
it('should remove content matching contentId from pane matching paneId', () => {
const panes = getPaneFixtures(['a'], { a: ['a.0', 'a.1'] })
panes[0].contents[0].isCurrent = true
const res = removePaneContent(panes, 'a', 'a.1')
expect(res).not.toBe(panes)
expect(res[0].contents).toHaveLength(1)
expect(res[0].contents[0]).toBe(panes[0].contents[0])
expect(res[0].contents[0]).toEqual(panes[0].contents[0])
})
it(`should set latest remaining content as current if there's none`, () => {
const panes = getPaneFixtures(['a'], { a: ['a.0', 'a.1'] })
const res = removePaneContent(panes, 'a', 'a.0')
expect(res).not.toBe(panes)
expect(res[0].contents).toHaveLength(1)
expect(res[0].contents[0]).not.toBe(panes[0].contents[0])
expect(res[0].contents[0].isCurrent).toBeTruthy()
})
it(`should replace parent pane with sibling pane if there's no remaining content and pane isn't root`, () => {
const panes = getPaneFixtures(['a', 'a__0', 'a__1'], {
a__0: ['a__0.0'],
a__1: ['a__1.0'],
})
panes[0].split = true
panes[0].children = ['a__0', 'a__1']
panes[1].childOf = 'a'
panes[2].childOf = 'a'
const res = removePaneContent(panes, 'a__0', 'a__0.0')
global.console.warn(res)
})
it('should throw if panes is empty', () => {
expect(() => {
removePaneContent([], 'invalid', 'contentId')
}).toThrow(`there's no available pane`)
})
it('should throw if paneId does not exist', () => {
expect(() => {
removePaneContent(getPaneFixtures(['a']), 'invalid', 'contentId')
}).toThrow('no pane found for id: invalid, available panes: a')
})
it('should throw if pane does not have any content', () => {
expect(() => {
removePaneContent(getPaneFixtures(['a']), 'a', 'invalid')
}).toThrow(`pane a doesn't have any content`)
})
it('should throw if contentId does not exist', () => {
expect(() => {
removePaneContent(getPaneFixtures(['a'], { a: ['a.0'] }), 'a', 'invalid')
}).toThrow('no content found in pane: a for id: invalid, available contents: a.0')
})
})
describe('splitPane', () => {
it('should split pane matching provided paneId into two sub panes using given axis', () => {
const panes = getPaneFixtures(['a'], { a: ['a.0'] })
panes[0].contents[0].isCurrent = true
const res = splitPane(panes, 'a', PaneSplitAxis.Horizontal)
expect(res).not.toBe(panes)
expect(res).toHaveLength(3)
expect(res[0].isCurrent).toBeFalsy()
expect(res[0].contents).toHaveLength(1)
expect(res[0].contents[0]).toEqual(panes[0].contents[0])
expect(res[1].isCurrent).toBeTruthy()
expect(res[1].contents).toHaveLength(1)
expect(res[1].contents[0]).toEqual(panes[0].contents[0])
expect(res[2].isCurrent).toBeFalsy()
expect(res[2].contents).toHaveLength(0)
expect(res[2].children).toHaveLength(2)
expect(res[2].children).toEqual([
res[0].id,
res[1].id,
])
})
it('should do nothing if target pane does not have a current content', () => {
const panes = getPaneFixtures(['a'], { a: ['a.0'] })
const res = splitPane(panes, 'a', PaneSplitAxis.Horizontal)
expect(res).toBe(panes)
expect(res).toEqual(panes)
})
it('should throw if panes is empty', () => {
expect(() => {
splitPane([], 'invalid', PaneSplitAxis.Horizontal)
}).toThrow(`there's no available pane`)
})
it('should throw if paneId does not exist', () => {
expect(() => {
splitPane(getPaneFixtures(['a']), 'invalid', PaneSplitAxis.Horizontal)
}).toThrow('no pane found for id: invalid, available panes: a')
})
}) | })
it('should throw if contentId does not exist', () => { |
order-quantity-validator.directive.ts | import { Validator, NG_VALIDATORS, AbstractControl } from '@angular/forms';
import { Directive, Input } from '@angular/core';
@Directive({
selector: '[appOrderQuantityValidator]',
providers: [{
provide: NG_VALIDATORS,
useExisting: OrderQuantityValidatorDirective,
multi: true
}]
})
export class OrderQuantityValidatorDirective implements Validator { | validate(control: AbstractControl): { [key: string]: any } | null {
const number = Number(control.value);
return number < 0 || number > 10 ? { 'invalidOrderQuantity': true } : null;
}
} | |
test_driver_gaussian_extra.py | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Driver Gaussian internals - does not require Gaussian installed """
import unittest
from test.chemistry import QiskitChemistryTestCase
from qiskit.chemistry.drivers import GaussianDriver
# We need to have an instance so we can test function but constructor calls
# an internal method to check G16 installed. We need to replace that with
# the following dummy for things to work and we do it for each test so the
# class ends up as it was
def _check_valid():
pass
class TestDriverGaussianExtra(QiskitChemistryTestCase): |
def setUp(self):
super().setUp()
self.good_check = GaussianDriver._check_valid
GaussianDriver._check_valid = _check_valid
# We can now create a driver without the installed (check valid) test failing
def tearDown(self):
GaussianDriver._check_valid = self.good_check
def test_cfg_augment(self):
""" test input configuration augmentation """
cfg = '# rhf/sto-3g scf(conventional)\n\n' \
'h2 molecule\n\n0 1\nH 0.0 0.0 0.0\nH 0.0 0.0 0.735\n\n'
g16 = GaussianDriver(cfg)
aug_cfg = g16._augment_config("mymatfile.mat", cfg)
expected = '# rhf/sto-3g scf(conventional)\n' \
'# Window=Full Int=NoRaff Symm=(NoInt,None)' \
' output=(matrix,i4labels,mo2el) tran=full\n\n' \
'h2 molecule\n\n0 1\nH 0.0 0.0 0.0\nH 0.0 0.0 0.735' \
'\n\nmymatfile.mat\n\n'
self.assertEqual(aug_cfg, expected)
if __name__ == '__main__':
unittest.main() | """Gaussian Driver extra tests for driver specifics, errors etc """ |
verifybench.rs | // This program does benchmarking of the functions in verify.rs,
// that do certificate chain validation and signature verification.
//
// Note: we don't use any of the standard 'cargo bench', 'test::Bencher',
// etc. because it's unstable at the time of writing.
use std::convert::TryInto;
use std::time::{Duration, Instant, SystemTime};
use crate::anchors;
use crate::key;
use crate::verify;
use crate::verify::ServerCertVerifier;
use webpki_roots;
fn duration_nanos(d: Duration) -> u64 {
((d.as_secs() as f64) * 1e9 + (d.subsec_nanos() as f64)) as u64
}
#[test]
fn test_reddit_cert() {
Context::new(
"reddit",
"reddit.com",
&[
include_bytes!("testdata/cert-reddit.0.der"),
include_bytes!("testdata/cert-reddit.1.der"),
],
)
.bench(100)
}
#[test]
fn test_github_cert() {
Context::new(
"github",
"github.com",
&[
include_bytes!("testdata/cert-github.0.der"),
include_bytes!("testdata/cert-github.1.der"),
],
)
.bench(100)
}
#[test]
fn test_arstechnica_cert() {
Context::new(
"arstechnica",
"arstechnica.com",
&[
include_bytes!("testdata/cert-arstechnica.0.der"),
include_bytes!("testdata/cert-arstechnica.1.der"),
include_bytes!("testdata/cert-arstechnica.2.der"),
include_bytes!("testdata/cert-arstechnica.3.der"),
],
)
.bench(100)
}
#[test]
fn test_servo_cert() {
Context::new(
"servo",
"servo.org",
&[
include_bytes!("testdata/cert-servo.0.der"),
include_bytes!("testdata/cert-servo.1.der"),
],
)
.bench(100)
}
#[test]
fn test_twitter_cert() {
Context::new(
"twitter",
"twitter.com",
&[
include_bytes!("testdata/cert-twitter.0.der"),
include_bytes!("testdata/cert-twitter.1.der"),
],
)
.bench(100)
}
#[test]
fn test_wikipedia_cert() {
Context::new(
"wikipedia",
"wikipedia.org",
&[
include_bytes!("testdata/cert-wikipedia.0.der"),
include_bytes!("testdata/cert-wikipedia.1.der"),
],
)
.bench(100)
}
#[test]
fn test_google_cert() {
Context::new(
"google",
"www.google.com",
&[
include_bytes!("testdata/cert-google.0.der"),
include_bytes!("testdata/cert-google.1.der"),
],
)
.bench(100)
}
#[test]
fn test_hn_cert() {
Context::new(
"hn",
"news.ycombinator.com",
&[
include_bytes!("testdata/cert-hn.0.der"),
include_bytes!("testdata/cert-hn.1.der"),
include_bytes!("testdata/cert-hn.2.der"),
],
)
.bench(100)
}
#[test]
fn test_stackoverflow_cert() {
Context::new(
"stackoverflow",
"stackoverflow.com",
&[
include_bytes!("testdata/cert-stackoverflow.0.der"),
include_bytes!("testdata/cert-stackoverflow.1.der"),
],
)
.bench(100)
}
#[test]
fn test_duckduckgo_cert() {
Context::new(
"duckduckgo",
"duckduckgo.com",
&[
include_bytes!("testdata/cert-duckduckgo.0.der"),
include_bytes!("testdata/cert-duckduckgo.1.der"),
],
)
.bench(100)
}
#[test]
fn test_rustlang_cert() {
Context::new(
"rustlang",
"www.rust-lang.org",
&[
include_bytes!("testdata/cert-rustlang.0.der"),
include_bytes!("testdata/cert-rustlang.1.der"),
include_bytes!("testdata/cert-rustlang.2.der"),
],
)
.bench(100)
}
#[test]
fn test_wapo_cert() {
Context::new(
"wapo",
"www.washingtonpost.com", | &[
include_bytes!("testdata/cert-wapo.0.der"),
include_bytes!("testdata/cert-wapo.1.der"),
],
)
.bench(100)
}
struct Context {
name: &'static str,
domain: &'static str,
roots: anchors::RootCertStore,
chain: Vec<key::Certificate>,
now: SystemTime,
}
impl Context {
fn new(name: &'static str, domain: &'static str, certs: &[&'static [u8]]) -> Self {
let mut roots = anchors::RootCertStore::empty();
roots.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.0);
Self {
name,
domain,
roots,
chain: certs
.iter()
.copied()
.map(|bytes| key::Certificate(bytes.to_vec()))
.collect(),
now: SystemTime::UNIX_EPOCH + Duration::from_secs(1617300000),
}
}
fn bench(&self, count: usize) {
let verifier = verify::WebPkiVerifier::new(self.roots.clone(), &[]);
const SCTS: &[&[u8]] = &[];
const OCSP_RESPONSE: &[u8] = &[];
let mut times = Vec::new();
let (end_entity, intermediates) = self.chain.split_first().unwrap();
for _ in 0..count {
let start = Instant::now();
let server_name = self.domain.try_into().unwrap();
verifier
.verify_server_cert(
end_entity,
intermediates,
&server_name,
&mut SCTS.iter().copied(),
OCSP_RESPONSE,
self.now,
)
.unwrap();
times.push(duration_nanos(Instant::now().duration_since(start)));
}
println!(
"verify_server_cert({}): min {:?}us",
self.name,
times.iter().min().unwrap() / 1000
);
}
} | |
join.go | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"time"
"github.com/pingcap/failpoint"
"github.com/pingcap/log"
"github.com/pingcap/pd/pkg/etcdutil"
"github.com/pkg/errors"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/embed"
"go.uber.org/zap"
)
const (
// privateFileMode grants owner to read/write a file.
privateFileMode = 0600
// privateDirMode grants owner to make/remove files inside the directory.
privateDirMode = 0700
)
// listMemberRetryTimes is the retry times of list member.
var listMemberRetryTimes = 20
// PrepareJoinCluster sends MemberAdd command to PD cluster,
// and returns the initial configuration of the PD cluster.
//
// TL;TR: The join functionality is safe. With data, join does nothing, w/o data
// and it is not a member of cluster, join does MemberAdd, it returns an
// error if PD tries to join itself, missing data or join a duplicated PD.
//
// Etcd automatically re-joins the cluster if there is a data directory. So
// first it checks if there is a data directory or not. If there is, it returns
// an empty string (etcd will get the correct configurations from the data
// directory.)
//
// If there is no data directory, there are following cases:
//
// - A new PD joins an existing cluster.
// What join does: MemberAdd, MemberList, then generate initial-cluster.
//
// - A failed PD re-joins the previous cluster.
// What join does: return an error. (etcd reports: raft log corrupted,
// truncated, or lost?)
//
// - A deleted PD joins to previous cluster.
// What join does: MemberAdd, MemberList, then generate initial-cluster.
// (it is not in the member list and there is no data, so
// we can treat it as a new PD.)
//
// If there is a data directory, there are following special cases:
//
// - A failed PD tries to join the previous cluster but it has been deleted
// during its downtime.
// What join does: return "" (etcd will connect to other peers and find
// that the PD itself has been removed.)
//
// - A deleted PD joins the previous cluster.
// What join does: return "" (as etcd will read data directory and find
// that the PD itself has been removed, so an empty string
// is fine.)
func PrepareJoinCluster(cfg *Config) error {
// - A PD tries to join itself.
if cfg.Join == "" {
return nil
}
if cfg.Join == cfg.AdvertiseClientUrls {
return errors.New("join self is forbidden")
}
filePath := path.Join(cfg.DataDir, "join")
// Read the persist join config
if _, err := os.Stat(filePath); !os.IsNotExist(err) {
s, err := ioutil.ReadFile(filePath)
if err != nil {
log.Fatal("read the join config meet error", zap.Error(err))
}
cfg.InitialCluster = strings.TrimSpace(string(s))
cfg.InitialClusterState = embed.ClusterStateFlagExisting
return nil
}
initialCluster := ""
// Cases with data directory.
if isDataExist(path.Join(cfg.DataDir, "member")) {
cfg.InitialCluster = initialCluster
cfg.InitialClusterState = embed.ClusterStateFlagExisting
return nil
}
// Below are cases without data directory.
tlsConfig, err := cfg.Security.ToTLSConfig()
if err != nil {
return err
}
client, err := clientv3.New(clientv3.Config{
Endpoints: strings.Split(cfg.Join, ","),
DialTimeout: etcdutil.DefaultDialTimeout,
TLS: tlsConfig,
})
if err != nil {
return errors.WithStack(err)
}
defer client.Close()
listResp, err := etcdutil.ListEtcdMembers(client)
if err != nil {
return err
}
existed := false
for _, m := range listResp.Members {
if len(m.Name) == 0 {
return errors.New("there is a member that has not joined successfully")
}
if m.Name == cfg.Name {
existed = true
}
}
// - A failed PD re-joins the previous cluster.
if existed {
return errors.New("missing data or join a duplicated pd")
}
var addResp *clientv3.MemberAddResponse
failpoint.Inject("add-member-failed", func() {
listMemberRetryTimes = 2
failpoint.Goto("LabelSkipAddMember")
})
// - A new PD joins an existing cluster.
// - A deleted PD joins to previous cluster.
{
// First adds member through the API
addResp, err = etcdutil.AddEtcdMember(client, []string{cfg.AdvertisePeerUrls})
if err != nil {
return err
}
}
failpoint.Label("LabelSkipAddMember")
var (
pds []string
listSucc bool
)
for i := 0; i < listMemberRetryTimes; i++ {
listResp, err = etcdutil.ListEtcdMembers(client)
if err != nil {
return err
}
pds = []string{}
for _, memb := range listResp.Members {
n := memb.Name
if addResp != nil && memb.ID == addResp.Member.ID {
n = cfg.Name
listSucc = true
}
if len(n) == 0 {
return errors.New("there is a member that has not joined successfully")
}
for _, m := range memb.PeerURLs {
pds = append(pds, fmt.Sprintf("%s=%s", n, m))
}
}
if listSucc {
break
}
time.Sleep(500 * time.Millisecond)
}
if !listSucc {
return errors.Errorf("join failed, adds the new member %s may failed", cfg.Name)
}
initialCluster = strings.Join(pds, ",")
cfg.InitialCluster = initialCluster
cfg.InitialClusterState = embed.ClusterStateFlagExisting
err = os.MkdirAll(cfg.DataDir, privateDirMode)
if err != nil && !os.IsExist(err) {
return errors.WithStack(err)
}
err = ioutil.WriteFile(filePath, []byte(cfg.InitialCluster), privateFileMode)
return errors.WithStack(err)
}
func isDataExist(d string) bool | {
dir, err := os.Open(d)
if err != nil {
log.Error("failed to open directory", zap.Error(err))
return false
}
defer dir.Close()
names, err := dir.Readdirnames(-1)
if err != nil {
log.Error("failed to list directory", zap.Error(err))
return false
}
return len(names) != 0
} |
|
requests.ts | import * as request from "request-promise";
const http = {
generalOptions: {
resolveWithFullResponse: true,
json: true
},
async get(uri: string, token: string = ""): Promise<{ [key: string]: any }> {
const { body, statusCode } = await request({
...this.generalOptions,
uri,
...(token && { headers: { "Session-Token": token } })
});
return { statusCode, ...body };
},
async post(
uri: string,
options: { [key: string]: any } = {
body: null,
token: "",
fileUpload: false
}
): Promise<{ [key: string]: any }> {
const { body, token, fileUpload } = options;
const createHeaders = () => {
const headers: { [key: string]: string } = {};
if (body) {
headers["Content-Type"] = fileUpload
? "application/json;charset=utf-8"
: "application/json";
}
if (token) {
headers["Session-Token"] = token;
} | return headers;
};
const { body: responseBody, statusCode } = await request({
...this.generalOptions,
method: "POST",
uri,
...(body && { body }),
headers: createHeaders()
});
return { statusCode, ...responseBody };
},
async put(
uri: string,
options: { [key: string]: any } = {
body: null,
token: ""
}
): Promise<{ [key: string]: any }> {
const { body, token } = options;
const { body: responseBody, statusCode } = await request({
...this.generalOptions,
method: "PUT",
uri,
...(body && { body }),
headers: {
"Content-Type": "application/json",
"Session-Token": token
}
});
return { statusCode, ...responseBody };
}
};
export default http; | |
e2e_scenarios.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import os
import json
import http
import random
import infra.network
import infra.proc
import infra.e2e_args
import infra.checker
from loguru import logger as LOG
def run(args):
# SNIPPET_START: parsing
with open(args.scenario) as f:
scenario = json.load(f)
hosts = scenario.get("hosts", infra.e2e_args.max_nodes(args, f=0))
args.package = scenario["package"]
# SNIPPET_END: parsing
scenario_dir = os.path.dirname(args.scenario)
# SNIPPET_START: create_network
with infra.network.network(
hosts, args.binary_dir, args.debug_nodes, args.perf_nodes
) as network:
network.start_and_join(args)
# SNIPPET_END: create_network
primary, backups = network.find_nodes()
with primary.client() as mc:
check = infra.checker.Checker()
check_commit = infra.checker.Checker(mc)
for connection in scenario["connections"]:
with (
primary.client("user0")
if not connection.get("on_backup")
else random.choice(backups).client("user0")
) as client:
txs = connection.get("transactions", [])
for include_file in connection.get("include", []):
with open(os.path.join(scenario_dir, include_file)) as f:
txs += json.load(f)
for tx in txs:
r = client.call(
tx["method"],
body=tx["body"],
http_verb=tx.get("verb", "POST"), |
if tx.get("expected_error") is not None:
check(
r,
error=lambda status, msg, transaction=tx: status
# pylint: disable=no-member
== http.HTTPStatus(
transaction.get("expected_error")
).value,
)
elif tx.get("expected_result") is not None:
check_commit(r, result=tx.get("expected_result"))
else:
check_commit(r, result=lambda res: res is not None)
network.wait_for_node_commit_sync()
if args.network_only:
LOG.info("Keeping network alive with the following nodes:")
LOG.info(" Primary = {}:{}".format(primary.pubhost, primary.pubport))
for i, f in enumerate(backups):
LOG.info(" Backup[{}] = {}:{}".format(i, f.pubhost, f.pubport))
input("Press Enter to shutdown...")
if __name__ == "__main__":
def add(parser):
parser.add_argument(
"--scenario",
help="Path to JSON file listing transactions to execute",
type=str,
required=True,
)
args = infra.e2e_args.cli_args(add=add)
run(args) | ) |
test_22_sporifica_virus.py | import pytest
from aoc_wim.aoc2017.q22 import mutate
test_data = """\
..#
#..
...
"""
@pytest.mark.parametrize("n,expected,part", [
(7, 5, "a"),
(70, 41, "a"),
(10000, 5587, "a"),
(100, 26, "b"),
(10000000, 2511944, "b")
], ids=["a_short", "a_medium", "a_long", "b_medium", "b_long_slow"])
def test_virus_mutation(n, expected, part):
| assert mutate(test_data, n_iterations=n, part=part) == expected |
|
engineer_tests.py | from __future__ import print_function, absolute_import
import unittest, math
import pandas as pd
import numpy as np
from . import *
class T(base_pandas_extensions_tester.BasePandasExtensionsTester):
def test_concat(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']})
df.engineer('concat(c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,c_2)'].values,
np.array(['ad', 'be', 'cf'], 'object')))
def test_concat_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h', 'i', 'j']})
df.engineer('concat(c_3, c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_3,c_1,c_2)'].values,
np.array(['had', 'ibe', 'jcf'], 'object')))
def test_concat_with_numerical_col(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3]})
df.engineer('concat(c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,n_2)'].values,
np.array(['a1', 'b2', 'c3'], 'object')))
def | (self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]})
df.engineer('concat(n_3,c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(n_3,c_1,n_2)'].values,
np.array(['4a1', '5b2', '6c3'], 'object')))
def test_multiplication(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3)'].values,
np.array([4, 10, 18], long)))
def test_multiplication_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3, n_4)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3,n_4)'].values,
np.array([4*7, 80, 18*9], long)))
def test_square_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 1*1, 4*4, 7*7],
['b', 2, 5, 8, 2*2, 5*5, 8*8],
['c', 3, 6, 9, 3*3, 6*6, 9*9],
], 'object'))
def test_square_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(n_3, 2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 4*4],
['b', 2, 5, 8, 5*5],
['c', 3, 6, 9, 6*6],
], 'object'))
def test_log_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(1), math.log(4), math.log(7)],
['b', 2, 5, 8, math.log(2), math.log(5), math.log(8)],
['c', 3, 6, 9, math.log(3), math.log(6), math.log(9)],
], 'object')))
def test_log_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(4)],
['b', 2, 5, 8, math.log(5)],
['c', 3, 6, 9, math.log(6)],
], 'object')))
def test_sqrt_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(1), math.sqrt(4), math.sqrt(7)],
['b', 2, 5, 8, math.sqrt(2), math.sqrt(5), math.sqrt(8)],
['c', 3, 6, 9, math.sqrt(3), math.sqrt(6), math.sqrt(9)],
], 'object')))
def test_sqrt_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(4)],
['b', 2, 5, 8, math.sqrt(5)],
['c', 3, 6, 9, math.sqrt(6)],
], 'object')))
def test_rolling_sum_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_sum(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_' + col])
def test_rolling_mean_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_mean(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_' + col], rtol=1e-3)
def test_rolling_median_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_median(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_' + col])
def test_rolling_min_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_min(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_' + col])
def test_rolling_max_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_max(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_' + col])
def test_rolling_std_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_std(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_' + col], rtol=1e-3)
def test_rolling_var_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_var(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_' + col], rtol=1e-3)
# Multiple Columns
def test_rolling_sum_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_sum(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_rolling_sum(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 6, 10, 10, 9, 8], df['n_rolling_sum(n_2,3)'])
def test_rolling_mean_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_mean(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_rolling_mean(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666], df['n_rolling_mean(n_2,3)'], rtol=1e-3)
def test_rolling_median_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_median(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_rolling_median(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 2, 3, 3, 2, 2], df['n_rolling_median(n_2,3)'])
def test_rolling_min_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_min(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_rolling_min(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 1, 2, 2, 2, 2], df['n_rolling_min(n_2,3)'])
def test_rolling_max_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_max(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_rolling_max(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 3, 5, 5, 5, 4], df['n_rolling_max(n_2,3)'])
def test_rolling_std_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_std(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_rolling_std(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547], df['n_rolling_std(n_2,3)'], rtol=1e-3)
def test_rolling_var_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_var(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_rolling_var(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333], df['n_rolling_var(n_2,3)'], rtol=1e-3)
def test_method_chaining(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.\
engineer('concat(c_1, c_2)').\
engineer('concat(c_1, n_2)').\
engineer('mult(n_2, n_3)').\
engineer('lg(n_2)').\
engineer('pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_call_semi_col_sep(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_with_arr_arg(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)'.split(';'))
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_long_method_chains(self):
df1 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df2 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df1.engineer('mult(lg(mult(n_1, n_2)), lg(pow(n_1, 3)))')
df2.engineer('mult(n_1,n_2);pow(n_1,3)')
df2.engineer('lg(pow(n_1,3));lg(mult(n_1, n_2))')
df2.engineer('mult(lg(mult(n_1,n_2)),lg(pow(n_1, 3)))')
np.testing.assert_array_equal(df1.columns.values.sort(), df2.columns.values.sort());
np.testing.assert_array_equal(df1['n_mult(n_1,n_2)'].values, df2['n_mult(n_1,n_2)'].values);
np.testing.assert_array_equal(df1['n_pow(n_1,3)'], df2['n_pow(n_1,3)']);
np.testing.assert_array_equal(df1['n_lg(pow(n_1,3))'], df2['n_lg(pow(n_1,3))']);
np.testing.assert_array_equal(df1['n_lg(mult(n_1,n_2))'], df2['n_lg(mult(n_1,n_2))']);
np.testing.assert_array_equal(df1['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'], df2['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))']);
| test_concat_with_numerical_col_3_cols |
prepare.py | import numpy as np
import cv2
def load_batch(ls,od,data_path,batch_size,which_batch):
|
def normalize(image_list):
ma=max(image_list.flatten())
mi=min(image_list.flatten())
mean = float((ma+mi)/2.0)
output = (image_list-mean)/(ma-mean)
return output
| image_list=[]
for i in range(which_batch*batch_size,(which_batch+1)*batch_size):
image=[]
image.append(cv2.imread(data_path+ls[od[i]]+'_red.png',0))
image.append(cv2.imread(data_path+ls[od[i]]+'_green.png',0))
image.append(cv2.imread(data_path+ls[od[i]]+'_blue.png',0))
image.append(cv2.imread(data_path+ls[od[i]]+'_yellow.png',0))
image=np.asarray(image).T
image_list.append(image)
image_list=np.asarray(image_list)
return image_list |
lib.rs | mod utils;
use ndarray::Array2;
use std::iter::Iterator;
use std::str::FromStr;
use tera::{Context, Tera};
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[derive(Clone, Debug)]
pub struct Graph {
pub name: String,
pub size: usize,
pub points: Vec<Point>,
pub color: String,
pub x_range: f64,
pub y_range: f64,
pub x_min: f64,
pub y_min: f64,
}
#[derive(Clone, Debug, Copy)]
pub struct Point {
pub x: f64,
pub y: f64,
}
impl Graph {
pub fn new(name: String, color: String) -> Self {
Graph {
name,
size: 0,
points: Vec::new(),
color,
x_range: 0.,
y_range: 0.,
x_min: 0.,
y_min: 0.,
}
}
pub fn add_point(&mut self, x: f64, y: f64) {
self.points.push(Point { x, y });
}
pub fn draw_svg(
&self,
width: usize,
height: usize,
padding: usize,
path: Vec<Point>,
centers: Vec<(f64, f64)>,
) -> String {
let mut context = Context::new();
let mut p: Vec<(f64, f64)> = Vec::new();
for point in path {
p.push((point.x, point.y));
}
context.insert("name", &self.name);
context.insert("width", &width);
context.insert("height", &height);
context.insert("padding", &padding);
context.insert("path", &p);
context.insert("centers", ¢ers);
context.insert("x_range", &self.x_range);
context.insert("y_range", &self.y_range);
context.insert("x_min", &self.x_min);
context.insert("y_min", &self.y_min);
context.insert("color", &self.color);
context.insert("lines", &5);
Tera::one_off(include_str!("graph.svg"), &context, true).expect("Could not draw graph")
}
}
#[wasm_bindgen]
pub fn fit_draw(
csv_content: &[u8],
num_clusters: usize,
width: usize,
height: usize,
padding: usize,
title: &str,
) -> String {
let data: Vec<f64> = read_data(csv_content);
let mut xs: Vec<f64> = Vec::new();
let mut ys: Vec<f64> = Vec::new();
let mut tuples: Vec<(f64, f64)> = Vec::new();
let mut centers: Vec<(f64, f64)> = Vec::new();
let center_arr: Vec<f64> = fit(csv_content, num_clusters);
for i in 0..center_arr.len() {
if (i % 2) == 1 {
centers.push((center_arr[i - 1], center_arr[i]));
}
}
for i in 0..data.len() {
if (i % 2) == 1 {
tuples.push((data[i - 1], data[i]));
}
}
for i in 0..tuples.len() {
xs.push(tuples[i].0);
ys.push(tuples[i].1);
}
let mut graph = generate_graph(xs, ys, title);
let width = width - padding * 2;
let height = height - padding * 2;
let x_max = graph
.points
.iter()
.map(|point| point.x)
.fold(0. / 0., f64::max);
let x_min = graph
.points
.iter()
.map(|point| point.x)
.fold(0. / 0., f64::min);
let y_max = graph
.points
.iter()
.map(|point| point.y)
.fold(0. / 0., f64::max);
let y_min = graph
.points
.iter()
.map(|point| point.y)
.fold(0. / 0., f64::min);
graph.x_min = (x_min - 1.0).round();
graph.y_min = (y_min - 1.0).round();
graph.x_range = (x_max + 1.0).round() - graph.x_min;
graph.y_range = (y_max + 1.0).round() - graph.y_min;
let centers = centers
.iter()
.map(|val| {
(
(val.0 - graph.x_min) / graph.x_range * width as f64 + padding as f64,
(val.1 - graph.y_min) / graph.y_range * (height as f64 * -1.0)
+ (padding + height) as f64,
)
})
.collect();
let path = graph
.points
.iter()
.map(|val| Point {
//x: (val.x / graph.max_x * width as f64) + padding as f64,
//y: (val.y / graph.max_y * (height as f64 * -1.0)) + (padding + height) as f64,
x: ((val.x - graph.x_min) / graph.x_range * width as f64) + padding as f64,
y: ((val.y - graph.y_min) / graph.y_range * (height as f64 * -1.0))
+ (padding + height) as f64,
})
.collect();
let out = graph.draw_svg(width, height, padding, path, centers);
out
}
pub fn generate_graph(xs: Vec<f64>, ys: Vec<f64>, title: &str) -> Graph |
pub fn fit(csv_content: &[u8], num_clusters: usize) -> Vec<f64> {
let data: Vec<f64> = read_data(csv_content);
let arr = Array2::from_shape_vec((data.len() / 2, 2), data).unwrap();
let (means, _clusters) = rkm::kmeans_lloyd(&arr.view(), num_clusters as usize);
let mut serialized_vec: Vec<f64> = Vec::new();
for row in means.genrows() {
serialized_vec.push(row[0]);
serialized_vec.push(row[1]);
}
serialized_vec
}
fn read_data(csv_content: &[u8]) -> Vec<f64> {
let mut data_reader = csv::Reader::from_reader(csv_content);
let mut data: Vec<f64> = Vec::new();
for record in data_reader.records() {
for field in record.unwrap().iter() {
let value = f64::from_str(field);
data.push(value.unwrap());
}
}
return data;
} | {
let mut graph = Graph::new(title.into(), "#8ff0a4".into());
graph.size = xs.len();
for i in 0..graph.size {
graph.add_point(xs[i], ys[i]);
}
graph
} |
tools.py | import rows
import os
from timeit import default_timer
import json
output_path = '../package/data/'
class Brasilio(object):
def __init__(self, output_path='../package/data/', verbose=False):
self.verbose = verbose
self.output_path = output_path
self.timer = default_timer
def __enter__(self):
# Cria diretório package
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
# Cria resouces.py vazio
json.dump([], open("resources.json", "w"), indent=2)
# Start Timer
self.start = self.timer()
return self
def __exit__(self, *args):
# Cria datapackage
create_datapackage(self.output_path, verbose=False)
# End Timer
end = self.timer()
self.elapsed_secs = end - self.start
self.elapsed = self.elapsed_secs # millisecs
if self.verbose:
print('Sucesso!\n Sua captura demorou: {0:.2f} s'.format(self.elapsed))
def generate_resources(filename, verbose=False):
data_path = os.path.join(output_path, filename)
if verbose:
print('Reading Data')
data = rows.import_from_csv(data_path)
translate = {int: 'integer',
str: 'string'}
resource = {'format': "csv",
"url": "http://brasil.io/dataset/{}?format=csv".format(filename.split('.')[0]),
"path": data_path,
"profile": "tabular-data-resource",
'schema': {
'fields': []}
}
for i, field in enumerate(data.field_names):
resource['schema']['fields'].append({'name': field,
'type': translate[data.field_types[i].TYPE[0]]})
if verbose:
print('Writing resources.json')
# print(type(resources))
# print(json.dumps(resources))
resources = json.load(open("resources.json", "r"))
resources.append(resource)
json.dump(resources, open("resources.json", "w"), indent=2)
def c | output_path, verbose=False):
# Criar o datapackage.json
if verbose:
print("Criando datapackage.json")
with open("metadata.json", "r") as mfd:
output = json.load(mfd)
with open("resources.json", "r") as rfd:
output['resources'] = json.load(rfd)
with open("../package/datapackage.json", "w") as datapackage:
json.dump(output, datapackage, indent=2)
if __name__ == '__main__':
pass | reate_datapackage( |
method_call_chain.ts | import {seq, opt, tok, star, alt, Expression, IStatementRunnable} from "../combi";
import {InstanceArrow, StaticArrow} from "../tokens/";
import {NewObject, ArrowOrDash, ComponentName, FieldChain, MethodCall, Cast} from "./";
import {ClassName} from "./class_name";
export class | extends Expression {
public getRunnable(): IStatementRunnable {
const fields = star(seq(new ArrowOrDash(), new ComponentName()));
const after = star(seq(fields, tok(InstanceArrow), new MethodCall()));
const localVariable = seq(new FieldChain(), tok(InstanceArrow));
const staticClass = seq(new ClassName(), tok(StaticArrow));
const ret = seq(alt(seq(opt(alt(localVariable, staticClass)), new MethodCall()),
new NewObject(),
new Cast()),
after);
return ret;
}
} | MethodCallChain |
rtl8139.rs | // The driver based on the online manual http://www.lowlevel.eu/wiki/RTL8139
#![allow(dead_code)]
use core::mem;
use crate::arch::kernel::irq::*;
use crate::arch::kernel::pci;
use crate::arch::kernel::percore::increment_irq_counter;
use crate::arch::mm::paging::virt_to_phys;
use crate::arch::mm::VirtAddr;
use crate::drivers::error::DriverError;
use crate::drivers::net::{netwakeup, network_irqhandler, NetworkInterface};
use crate::x86::io::*;
/// size of the receive buffer
const RX_BUF_LEN: usize = 8192;
/// size of the send buffer
const TX_BUF_LEN: usize = 4096;
/// the ethernet ID (6bytes) => MAC address
const IDR0: u16 = 0x0;
/// transmit status of each descriptor (4bytes/descriptor) (C mode)
const TSD0: u16 = 0x10;
/// transmit start address of descriptor 0 (4byte, C mode, 4 byte alignment)
const TSAD0: u16 = 0x20;
/// transmit start address of descriptor 1 (4byte, C mode, 4 byte alignment)
const TSAD1: u16 = 0x24;
/// transmit normal priority descriptors start address (8bytes, C+ mode, 256 byte-align)
const TNPDS: u16 = 0x20;
/// transmit start address of descriptor 2 (4byte, C mode, 4 byte alignment)
const TSAD2: u16 = 0x28;
/// transmit start address of descriptor 3 (4byte, C mode, 4 byte alignment)
const TSAD3: u16 = 0x2c;
/// command register (1byte)
const CR: u16 = 0x37;
/// current address of packet read (2byte, C mode, initial value 0xFFF0)
const CAPR: u16 = 0x38;
/// interrupt mask register (2byte)
const IMR: u16 = 0x3c;
/// interrupt status register (2byte)
const ISR: u16 = 0x3e;
/// transmit config register (4byte)
const TCR: u16 = 0x40;
/// receive config register (4byte)
const RCR: u16 = 0x44;
// command register for 93C46 (93C56) (1byte)
const CR9346: u16 = 0x50;
/// config register 0 (1byte)
const CONFIG0: u16 = 0x51;
/// config register 1 (1byte)
const CONFIG1: u16 = 0x52;
/// media status register (1byte)
const MSR: u16 = 0x58;
/// recieve buffer start address (C mode, 4 byte alignment)
const RBSTART: u16 = 0x30;
/// basic mode control register (2byte)
const BMCR: u16 = 0x62;
/// basic mode status register (2byte)
const BMSR: u16 = 0x64;
/// Reset, set to 1 to invoke S/W reset, held to 1 while resetting
const CR_RST: u8 = 0x10;
/// Reciever Enable, enables receiving
const CR_RE: u8 = 0x08;
/// Transmitter Enable, enables transmitting
const CR_TE: u8 = 0x04;
/// Rx buffer is empty
const CR_BUFE: u8 = 0x01;
// determine the operating mode
const CR9346_EEM1: u8 = 0x80;
/// 00 = Normal, 01 = Auto-load, 10 = Programming, 11 = Config, Register write enabled
const CR9346_EEM0: u8 = 0x40;
/// status of EESK
const CR9346_EESK: u8 = 0x4;
/// status of EEDI
const CR9346_EEDI: u8 = 0x2;
/// status of EEDO
const CR9346_EEDO: u8 = 0x1;
/// leds status
const CONFIG1_LEDS: u8 = 0xC0;
/// is the driver loaded ?
const CONFIG1_DVRLOAD: u8 = 0x20;
/// lanwake mode
const CONFIG1_LWACT: u8 = 0x10;
/// Memory mapping enabled ?
const CONFIG1_MEMMAP: u8 = 0x8;
/// IO map enabled ?
const CONFIG1_IOMAP: u8 = 0x4;
/// enable the virtal product data
const CONFIG1_VPD: u8 = 0x2;
/// Power Managment Enable
const CONFIG1_PMEN: u8 = 0x1;
// Media Status Register
const MSR_TXFCE: u8 = 0x80; // Tx Flow Control enabled
const MSR_RXFCE: u8 = 0x40; // Rx Flow Control enabled
const MSR_AS: u8 = 0x10; // Auxilary status
const MSR_SPEED: u8 = 0x8; // set if currently talking on 10mbps network, clear if 100mbps
const MSR_LINKB: u8 = 0x4; // Link Bad ?
const MSR_TXPF: u8 = 0x2; // Transmit Pause flag
const MSR_RXPF: u8 = 0x1; // Recieve Pause flag
const RCR_ERTH3: u32 = 0x0800_0000; // early Rx Threshold 0
const RCR_ERTH2: u32 = 0x0400_0000; // early Rx Threshold 1
const RCR_ERTH1: u32 = 0x0200_0000; // early Rx Threshold 2
const RCR_ERTH0: u32 = 0x0100_0000; // early Rx Threshold 3
const RCR_MRINT: u32 = 0x20000; // Multiple Early interrupt, (enable to make interrupts happen early, yuk)
const RCR_RER8: u32 = 0x10000; // Receive Error Packets larger than 8 bytes
const RCR_RXFTH2: u32 = 0x8000; // Rx Fifo threshold 0
const RCR_RXFTH1: u32 = 0x4000; // Rx Fifo threshold 1 (set to 110 and it will send to system when 1024bytes have been gathered)
const RCR_RXFTH0: u32 = 0x2000; // Rx Fifo threshold 2 (set all these to 1, and it wont FIFO till the full packet is ready)
const RCR_RBLEN1: u32 = 0x1000; // Rx Buffer length 0
const RCR_RBLEN0: u32 = 0x800; // Rx Buffer length 1 (C mode, 11 = 64kb, 10 = 32k, 01 = 16k, 00 = 8k)
const RCR_MXDMA2: u32 = 0x400; // Max DMA burst size 0
const RCR_MXDMA1: u32 = 0x200; // Max DMA burst size 1
const RCR_MXDMA0: u32 = 0x100; // Max DMA burst size 2
const RCR_WRAP: u32 = 0x80; // (void if buffer size = 64k, C mode, wrap to beginning of Rx buffer if we hit the end)
const RCR_EEPROMSEL: u32 = 0x40; // EEPROM type (0 = 9346, 1 = 9356)
const RCR_AER: u32 = 0x20; // Accept Error Packets (do we accept bad packets ?)
const RCR_AR: u32 = 0x10; // Accept runt packets (accept packets that are too small ?)
const RCR_AB: u32 = 0x08; // Accept Broadcast packets (accept broadcasts ?)
const RCR_AM: u32 = 0x04; // Accept multicast ?
const RCR_APM: u32 = 0x02; // Accept Physical matches (accept packets sent to our mac ?)
const RCR_AAP: u32 = 0x01; // Accept packets with a physical address ?
const TCR_HWVERID: u32 = 0x7CC0_0000; // mask for hw version ID's
const TCR_HWOFFSET: u32 = 22;
const TCR_IFG: u32 = 0x0300_0000; // interframe gap time
const TCR_LBK1: u32 = 0x40000; // loopback test
const TCR_LBK0: u32 = 0x20000; // loopback test
const TCR_CRC: u32 = 0x10000; // append CRC (card adds CRC if 1)
const TCR_MXDMA2: u32 = 0x400; // max dma burst
const TCR_MXDMA1: u32 = 0x200; // max dma burst
const TCR_MXDMA0: u32 = 0x100; // max dma burst
const TCR_TXRR: u32 = 0xF0; // Tx retry count, 0 = 16 else retries TXRR * 16 + 16 times
const TCR_CLRABT: u32 = 0x01; // Clear abort, attempt retransmit (when in abort state)
// Basic mode control register
const BMCR_RESET: u16 = 0x8000; // set the status and control of PHY to default
const BMCR_SPD100: u16 = 1 << 13; // 100 MBit
const BMCR_SPD1000: u16 = 1 << 6; // 1000 MBit
const BMCR_ANE: u16 = 0x1000; // enable N-way autonegotiation (ignore above if set)
const BMCR_RAN: u16 = 0x400; // restart auto-negotiation
const BMCR_DUPLEX: u16 = 0x200; // Duplex mode, generally a value of 1 means full-duplex
// Interrupt Status/Mask Register
// Bits in IMR enable/disable interrupts for specific events
// Bits in ISR indicate the status of the card
const ISR_SERR: u16 = 0x8000; // System error interrupt
const ISR_TUN: u16 = 0x4000; // time out interrupt
const ISR_SWINT: u16 = 0x100; // Software interrupt
const ISR_TDU: u16 = 0x80; // Tx Descriptor unavailable
const ISR_FIFOOVW: u16 = 0x40; // Rx Fifo overflow
const ISR_PUN: u16 = 0x20; // Packet underrun/link change
const ISR_RXOVW: u16 = 0x10; // Rx overflow/Rx Descriptor unavailable
const ISR_TER: u16 = 0x08; // Tx Error
const ISR_TOK: u16 = 0x04; // Tx OK
const ISR_RER: u16 = 0x02; // Rx Error
const ISR_ROK: u16 = 0x01; // Rx OK
const R39_INTERRUPT_MASK: u16 = 0x7f;
// Transmit Status of Descriptor0-3 (C mode only)
const TSD_CRS: u32 = 1 << 31; // carrier sense lost (during packet transmission)
const TSD_TABT: u32 = 1 << 30; // transmission abort
const TSD_OWC: u32 = 1 << 29; // out of window collision
const TSD_CDH: u32 = 1 << 28; // CD Heart beat (Cleared in 100Mb mode)
const TSD_NCC: u32 = 0x0F00_0000; // Number of collisions counted (during transmission)
const TSD_EARTH: u32 = 0x003F_0000; // threshold to begin transmission (0 = 8bytes, 1->2^6 = * 32bytes)
const TSD_TOK: u32 = 1 << 15; // Transmission OK, successful
const TSD_TUN: u32 = 1 << 14; // Transmission FIFO underrun
const TSD_OWN: u32 = 1 << 13; // Tx DMA operation finished (driver must set to 0 when TBC is written)
const TSD_SIZE: u32 = 0x1fff; // Descriptor size, the total size in bytes of data to send (max 1792)
/// To set the RTL8139 to accept only the Transmit OK (TOK) and Receive OK (ROK)
/// interrupts, we would have the TOK and ROK bits of the IMR high and leave the
/// rest low. That way when a TOK or ROK IRQ happens, it actually will go through
/// and fire up an IRQ.
const INT_MASK: u16 = ISR_ROK | ISR_TOK | ISR_RXOVW | ISR_TER | ISR_RER;
/// Beside Receive OK (ROK) interrupt, this mask enable all other interrupts
const INT_MASK_NO_ROK: u16 = ISR_TOK | ISR_RXOVW | ISR_TER | ISR_RER;
const NO_TX_BUFFERS: usize = 4;
#[derive(Debug)]
pub enum RTL8139Error {
InitFailed,
ResetFailed,
Unknown,
}
/// RealTek RTL8139 network driver struct.
///
/// Struct allows to control device queus as also
/// the device itself.
pub struct RTL8139Driver {
iobase: u16,
mtu: u16,
irq: u8,
mac: [u8; 6],
tx_in_use: [bool; NO_TX_BUFFERS],
tx_counter: usize,
rxbuffer: VirtAddr,
rxpos: usize,
txbuffer: VirtAddr,
}
impl NetworkInterface for RTL8139Driver {
/// Returns the MAC address of the network interface
fn get_mac_address(&self) -> [u8; 6] {
self.mac
}
/// Returns the current MTU of the device. | self.mtu
}
fn get_tx_buffer(&mut self, len: usize) -> Result<(*mut u8, usize), ()> {
let id = self.tx_counter % NO_TX_BUFFERS;
if self.tx_in_use[id] || len > TX_BUF_LEN {
error!("Unable to get TX buffer");
Err(())
} else {
self.tx_in_use[id] = true;
self.tx_counter += 1;
Ok(((self.txbuffer.as_usize() + id * TX_BUF_LEN) as *mut u8, id))
}
}
fn free_tx_buffer(&self, _token: usize) {
// get_tx_buffer did not allocate
}
fn send_tx_buffer(&mut self, id: usize, len: usize) -> Result<(), ()> {
// send the packet
unsafe {
outl(
self.iobase + TSD0 as u16 + (4 * id as u16),
len.try_into().unwrap(),
); //|0x3A0000);
}
Ok(())
}
fn has_packet(&self) -> bool {
let cmd = unsafe { inb(self.iobase + CR as u16) };
if (cmd & CR_BUFE) != CR_BUFE {
let header: u16 = unsafe { *((self.rxbuffer.as_usize() + self.rxpos) as *const u16) };
if header & ISR_ROK == ISR_ROK {
return true;
}
}
false
}
fn receive_rx_buffer(&mut self) -> Result<(&'static [u8], usize), ()> {
let cmd = unsafe { inb(self.iobase + CR as u16) };
if (cmd & CR_BUFE) != CR_BUFE {
let header: u16 = unsafe { *((self.rxbuffer.as_usize() + self.rxpos) as *const u16) };
self.rxpos = (self.rxpos + mem::size_of::<u16>()) % RX_BUF_LEN;
if header & ISR_ROK == ISR_ROK {
let length: u16 =
unsafe { *((self.rxbuffer.as_usize() + self.rxpos) as *const u16) } - 4; // copy packet (but not the CRC)
Ok((
unsafe {
core::slice::from_raw_parts(
(self.rxbuffer.as_usize() + self.rxpos + mem::size_of::<u16>())
as *const u8,
length as usize,
)
},
self.rxpos,
))
} else {
error!(
"RTL8192: invalid header {:#x}, rx_pos {}\n",
header, self.rxpos
);
Err(())
}
} else {
Err(())
}
}
// Tells driver, that buffer is consumed and can be deallocated
fn rx_buffer_consumed(&mut self, handle: usize) {
if self.rxpos != handle {
warn!("Invalid handle {} != {}", self.rxpos, handle)
}
let length: u16 = unsafe { *((self.rxbuffer.as_usize() + self.rxpos) as *const u16) };
self.rxpos = (self.rxpos + length as usize + mem::size_of::<u16>()) % RX_BUF_LEN;
// packets are dword aligned
self.rxpos = ((self.rxpos + 3) & !0x3) % RX_BUF_LEN;
unsafe {
outw(self.iobase + CAPR, (self.rxpos - 0x10).try_into().unwrap());
}
}
fn set_polling_mode(&mut self, value: bool) {
if value {
// disable interrupts from the NIC
unsafe {
outw(self.iobase + IMR, INT_MASK_NO_ROK);
}
} else {
// Enable all known interrupts by setting the interrupt mask.
unsafe {
outw(self.iobase + IMR, INT_MASK);
}
}
}
fn handle_interrupt(&mut self) -> bool {
increment_irq_counter((32 + self.irq).into());
let isr_contents = unsafe { inw(self.iobase + ISR) };
if (isr_contents & ISR_TOK) == ISR_TOK {
self.tx_handler();
}
if (isr_contents & ISR_RER) == ISR_RER {
error!("RTL88139: RX error detected!\n");
}
if (isr_contents & ISR_TER) == ISR_TER {
error!("RTL88139r: TX error detected!\n");
}
if (isr_contents & ISR_RXOVW) == ISR_RXOVW {
error!("RTL88139: RX overflow detected!\n");
}
let ret = (isr_contents & ISR_ROK) == ISR_ROK;
if ret {
// handle incoming packets
#[cfg(not(feature = "newlib"))]
netwakeup();
}
unsafe {
outw(
self.iobase + ISR,
isr_contents & (ISR_RXOVW | ISR_TER | ISR_RER | ISR_TOK | ISR_ROK),
);
}
ret
}
}
impl RTL8139Driver {
fn tx_handler(&mut self) {
for i in 0..self.tx_in_use.len() {
if self.tx_in_use[i] {
let txstatus = unsafe { inl(self.iobase + TSD0 + i as u16 * 4) };
if (txstatus & (TSD_TABT | TSD_OWC)) > 0 {
error!("RTL8139: major error");
continue;
}
if (txstatus & TSD_TUN) == TSD_TUN {
error!("RTL8139: transmit underrun");
}
if (txstatus & TSD_TOK) == TSD_TOK {
self.tx_in_use[i] = false;
}
}
}
}
}
impl Drop for RTL8139Driver {
fn drop(&mut self) {
debug!("Dropping RTL8129Driver!");
// Software reset
unsafe {
outb(self.iobase + CR, CR_RST);
}
crate::mm::deallocate(self.rxbuffer, RX_BUF_LEN);
crate::mm::deallocate(self.txbuffer, NO_TX_BUFFERS * TX_BUF_LEN);
}
}
pub fn init_device(adapter: &pci::PciAdapter) -> Result<RTL8139Driver, DriverError> {
let mut iter = adapter.base_addresses.iter().filter_map(|&x| match x {
pci::PciBar::IO(base) => Some(base.addr),
_ => None,
});
let iobase: u16 = iter
.next()
.ok_or(DriverError::InitRTL8139DevFail(RTL8139Error::Unknown))?
.try_into()
.unwrap();
debug!(
"Found RTL8139 at iobase {:#x} (irq {})",
iobase, adapter.irq
);
adapter.make_bus_master();
let mac: [u8; 6] = unsafe {
[
inb(iobase + IDR0),
inb(iobase + IDR0 + 1),
inb(iobase + IDR0 + 2),
inb(iobase + IDR0 + 3),
inb(iobase + IDR0 + 4),
inb(iobase + IDR0 + 5),
]
};
debug!(
"MAC address {:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}",
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]
);
unsafe {
if inl(iobase + TCR) == 0x00FF_FFFFu32 {
error!("Unable to initialize RTL8192");
return Err(DriverError::InitRTL8139DevFail(RTL8139Error::InitFailed));
}
// Software reset
outb(iobase + CR, CR_RST);
// The RST bit must be checked to make sure that the chip has finished the reset.
// If the RST bit is high (1), then the reset is still in operation.
crate::arch::kernel::processor::udelay(10000);
let mut tmp: u16 = 10000;
while (inb(iobase + CR) & CR_RST) == CR_RST && tmp > 0 {
tmp -= 1;
}
if tmp == 0 {
error!("RTL8139 reset failed");
return Err(DriverError::InitRTL8139DevFail(RTL8139Error::ResetFailed));
}
// Enable Receive and Transmitter
outb(iobase + CR, CR_TE | CR_RE); // Sets the RE and TE bits high
// lock config register
outb(iobase + CR9346, CR9346_EEM1 | CR9346_EEM0);
// clear all of CONFIG1
outb(iobase + CONFIG1, 0);
// disable driver loaded and lanwake bits, turn driver loaded bit back on
outb(
iobase + CONFIG1,
(inb(iobase + CONFIG1) & !(CONFIG1_DVRLOAD | CONFIG1_LWACT)) | CONFIG1_DVRLOAD,
);
// unlock config register
outb(iobase + CR9346, 0);
/*
* configure receive buffer
* AB - Accept Broadcast: Accept broadcast packets sent to mac ff:ff:ff:ff:ff:ff
* AM - Accept Multicast: Accept multicast packets.
* APM - Accept Physical Match: Accept packets send to NIC's MAC address.
* AAP - Accept All Packets. Accept all packets (run in promiscuous mode).
*/
outl(
iobase + RCR,
RCR_MXDMA2 | RCR_MXDMA1 | RCR_MXDMA0 | RCR_AB | RCR_AM | RCR_APM | RCR_AAP,
); // The WRAP bit isn't set!
// set the transmit config register to
// be the normal interframe gap time
// set DMA max burst to 64bytes
outl(iobase + TCR, TCR_IFG | TCR_MXDMA0 | TCR_MXDMA1 | TCR_MXDMA2);
}
let rxbuffer = crate::mm::allocate(RX_BUF_LEN, true);
let txbuffer = crate::mm::allocate(NO_TX_BUFFERS * TX_BUF_LEN, true);
if txbuffer.is_zero() || rxbuffer.is_zero() {
error!("Unable to allocate buffers for RTL8139");
return Err(DriverError::InitRTL8139DevFail(RTL8139Error::Unknown));
}
debug!(
"Allocate TxBuffer at {:#x} and RxBuffer at {:#x}",
txbuffer, rxbuffer
);
unsafe {
// register the receive buffer
outl(
iobase + RBSTART,
virt_to_phys(rxbuffer).as_u64().try_into().unwrap(),
);
// set each of the transmitter start address descriptors
outl(
iobase + TSAD0,
virt_to_phys(txbuffer).as_u64().try_into().unwrap(),
);
outl(
iobase + TSAD1,
virt_to_phys(txbuffer + TX_BUF_LEN)
.as_u64()
.try_into()
.unwrap(),
);
outl(
iobase + TSAD2,
virt_to_phys(txbuffer + 2 * TX_BUF_LEN)
.as_u64()
.try_into()
.unwrap(),
);
outl(
iobase + TSAD3,
virt_to_phys(txbuffer + 3 * TX_BUF_LEN)
.as_u64()
.try_into()
.unwrap(),
);
// Enable all known interrupts by setting the interrupt mask.
outw(iobase + IMR, INT_MASK);
outw(iobase + BMCR, BMCR_ANE);
let speed;
let tmp = inw(iobase + BMCR);
if tmp & BMCR_SPD1000 == BMCR_SPD1000 {
speed = 1000;
} else if tmp & BMCR_SPD100 == BMCR_SPD100 {
speed = 100;
} else {
speed = 10;
}
// Enable Receive and Transmitter
outb(iobase + CR, CR_TE | CR_RE); // Sets the RE and TE bits high
info!(
"RTL8139: CR = {:#x}, ISR = {:#x}, speed = {} mbps",
inb(iobase + CR),
inw(iobase + ISR),
speed
);
}
// Install interrupt handler for RTL8139
debug!("Install interrupt handler for RTL8139 at {}", adapter.irq);
irq_install_handler(adapter.irq.into(), network_irqhandler as usize);
add_irq_name(adapter.irq as u32, "rtl8139_net");
Ok(RTL8139Driver {
iobase,
mtu: 1500,
irq: adapter.irq,
mac,
tx_in_use: [false; NO_TX_BUFFERS],
tx_counter: 0,
rxbuffer,
rxpos: 0,
txbuffer,
})
} | fn get_mtu(&self) -> u16 { |
bus_stop.rs | use crate::{LaneID, Position};
use serde_derive::{Deserialize, Serialize};
use std::fmt;
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct | {
pub sidewalk: LaneID,
// This might actually not be contiguous and correct; we could remove a stop in between two
// others
pub idx: usize,
}
impl fmt::Display for BusStopID {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "BusStopID({0}, {1})", self.sidewalk, self.idx)
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct BusRouteID(pub usize);
impl fmt::Display for BusRouteID {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "BusRouteID({0})", self.0)
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct BusStop {
pub id: BusStopID,
// These might be on opposite sides of the road in the case of one-ways. Shouldn't matter
// anywhere.
pub driving_pos: Position,
pub sidewalk_pos: Position,
}
impl BusStop {
pub fn dump_debug(&self) {
println!("{}", abstutil::to_json(self));
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct BusRoute {
pub id: BusRouteID,
pub name: String,
pub stops: Vec<BusStopID>,
}
| BusStopID |
test_formulator.py | import symro
import symro.src.mat as mat
from symro.src.prob.problem import Problem
from symro.src.parsing.amplparser import AMPLParser
import symro.src.handlers.nodebuilder as nb
import symro.src.handlers.formulator as frm
from symro.test.test_util import *
# Scripts
# ----------------------------------------------------------------------------------------------------------------------
SCRIPT = """
set I = 1..3;
var x >= 0, <= 1;
var y{I} >= 0, <= 1;
minimize OBJ: 0;
"""
# Tests
# ----------------------------------------------------------------------------------------------------------------------
def run_formulator_test_group():
tests = [("Expression expansion test", test_expansion),
("Expression simplification test", test_simplification)]
return run_tests(tests)
def test_expansion():
problem = symro.read_ampl(script_literal=SCRIPT,
working_dir_path=SCRIPT_DIR_PATH)
ampl_parser = AMPLParser(problem)
results = []
# test 1
literal = "x + 1 - 2 + 3 * x + 4 / x"
node = ampl_parser.parse_arithmetic_expression(literal)
node = __standardize_expression(problem, node)
results.append(check_str_result(node, "x + (1) + (-2) + (3 * x) + (4 * (1 / x))"))
# test 2
literal = "(1 + x) * (2 + 3 * x)"
node = ampl_parser.parse_arithmetic_expression(literal)
node = __standardize_expression(problem, node)
results.append(check_str_result(node, "(1 * 2) + (1 * 3 * x) + (x * 2) + (x * 3 * x)"))
# test 3:
literal = "(x^2 + 4 * x + 5) * (6 * x + 7) * (8 + 9 / x)"
node = ampl_parser.parse_arithmetic_expression(literal)
node = __standardize_expression(problem, node)
results.append(check_str_result(
node,
"(x * x * 6 * x * 8) + (x * x * 6 * x * 9 * (1 / x)) + (x * x * 7 * 8) + (x * x * 7 * 9 * (1 / x))"
" + (4 * x * 6 * x * 8) + (4 * x * 6 * x * 9 * (1 / x)) + (4 * x * 7 * 8) + (4 * x * 7 * 9 * (1 / x))"
" + (5 * 6 * x * 8) + (5 * 6 * x * 9 * (1 / x)) + (5 * 7 * 8) + (5 * 7 * 9 * (1 / x))"
))
# test 4
literal = "(x + sum {i in I} 2 * y[i]) * (x^2 + 3)"
node = ampl_parser.parse_arithmetic_expression(literal)
node = __standardize_expression(problem, node)
results.append(check_str_result(
node,
"(x * x * x) + (x * 3) + (sum {i in I} (2 * y[i] * x * x)) + (sum {i in I} (2 * y[i] * 3))"
))
# test 5
literal = "(x + sum {i in I} 2 * y[i]) * (x * sum {i in I} y[i])"
node = ampl_parser.parse_arithmetic_expression(literal)
node = __standardize_expression(problem, node)
results.append(check_str_result(
node,
"(sum {i in I} (x * x * y[i])) + (sum {i in I, i1 in I} (2 * y[i] * x * y[i1]))"
))
# test 6
literal = "x * (if 1 < 2 then sum {i in I} y[i] else sum {i in I} y[i] ^ 2)"
node = ampl_parser.parse_arithmetic_expression(literal)
node = __standardize_expression(problem, node)
results.append(check_str_result(
node,
"(sum {i in I: (1 < 2)} (x * y[i])) + (sum {i in I: (! (1 < 2))} (x * y[i] * y[i]))"
))
# test 7
literal = "(if 1 < 2 then x else 5) * (sum {i in I} y[i] + 10)"
node = ampl_parser.parse_arithmetic_expression(literal)
node = __standardize_expression(problem, node)
results.append(check_str_result(
node,
"(sum {i in I: (1 < 2)} (x * y[i])) + (if (1 < 2) then (x * 10))"
" + (sum {i in I: (! (1 < 2))} ((5) * y[i])) + (if (! (1 < 2)) then ((5) * 10))"
))
# test 8
literal = "2 ^ (1/0.8)"
node = ampl_parser.parse_arithmetic_expression(literal)
node = __standardize_expression(problem, node)
results.append(check_str_result(
node,
"((2 ^ (1 * (1 / 0.8))))"
))
return results
def test_simplification():
problem = symro.read_ampl(script_literal=SCRIPT,
working_dir_path=SCRIPT_DIR_PATH)
ampl_parser = AMPLParser(problem)
results = []
# test 1
literal = "1 - 4 + x"
node = ampl_parser.parse_arithmetic_expression(literal)
node = frm.simplify(problem, node)
results.append(check_str_result(node, "-3 + x"))
# test 2
literal = "if 1 > 0 then x else if 1 < 0 then 5"
node = ampl_parser.parse_arithmetic_expression(literal)
node = frm.simplify(problem, node)
results.append(check_str_result(node, "x"))
return results
# Utility
# ----------------------------------------------------------------------------------------------------------------------
def __standardize_expression(problem: Problem, node: mat.ArithmeticExpressionNode):
node = frm.reformulate_arithmetic_conditional_expressions(node)
node = frm.reformulate_subtraction_and_unary_negation(node)
terms = frm.expand_multiplication(problem, node)
ref_terms = []
for term in terms:
if isinstance(term, mat.ArithmeticOperationNode) and term.operator == mat.MULTIPLICATION_OPERATOR:
|
else:
ref_terms.append(term)
return nb.build_addition_node(ref_terms)
| term = frm.combine_arithmetic_reduction_nodes(problem, term)
ref_terms.append(term) |
day3.rs | use crate::PartResult;
use std::collections::HashSet;
pub fn part1(lines: &Vec<String>) -> PartResult {
let trees = parse_input(&lines);
let points = count_trees(&trees, Coord(3, 1));
Ok(points.to_string())
}
pub fn part2(lines: &Vec<String>) -> PartResult {
let trees = parse_input(&lines);
let trees = count_trees(&trees, Coord(1, 1))
* count_trees(&trees, Coord(3, 1))
* count_trees(&trees, Coord(5, 1))
* count_trees(&trees, Coord(7, 1))
* count_trees(&trees, Coord(1, 2));
Ok(trees.to_string())
}
fn count_trees(map: &HashSet<Coord>, slope: Coord) -> usize {
slope.into_iter().filter(|c| map.contains(c)).count()
}
fn parse_input(lines: &Vec<String>) -> HashSet<Coord> {
lines
.iter()
.enumerate()
.flat_map(|(y, line)| -> Vec<Coord> {
line.chars()
.enumerate()
.filter_map(|(x, c)| if c == '#' { Some(Coord(x, y)) } else { None })
.collect()
})
.collect()
}
#[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)]
struct Coord(usize, usize);
impl Coord {
fn into_iter(self) -> CoordIter {
CoordIter {
current: Coord(0, 0),
delta: self,
}
}
}
struct CoordIter {
current: Coord,
delta: Coord,
}
impl Iterator for CoordIter {
type Item = Coord;
fn next(&mut self) -> Option<Self::Item> {
let x = (self.current.0 + self.delta.0) % 31;
let y = self.current.1 + self.delta.1;
if y > 322 {
None
} else |
}
}
| {
let c = Coord(x, y);
self.current = c;
Some(c)
} |
ffi.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Contains declarations to bind to the [C Data Interface](https://arrow.apache.org/docs/format/CDataInterface.html).
//!
//! Generally, this module is divided in two main interfaces:
//! One interface maps C ABI to native Rust types, i.e. convert c-pointers, c_char, to native rust.
//! This is handled by [FFI_ArrowSchema] and [FFI_ArrowArray].
//!
//! The second interface maps native Rust types to the Rust-specific implementation of Arrow such as `format` to [Datatype],
//! `Buffer`, etc. This is handled by [ArrowArray].
//!
//! ```rust
//! # use std::sync::Arc;
//! # use arrow::array::{Int32Array, Array, ArrayData, make_array_from_raw};
//! # use arrow::error::{Result, ArrowError};
//! # use arrow::compute::kernels::arithmetic;
//! # use std::convert::TryFrom;
//! # fn main() -> Result<()> {
//! // create an array natively
//! let array = Int32Array::from(vec![Some(1), None, Some(3)]);
//!
//! // export it
//! let (array_ptr, schema_ptr) = array.to_raw()?;
//!
//! // consumed and used by something else...
//!
//! // import it
//! let array = unsafe { make_array_from_raw(array_ptr, schema_ptr)? };
//!
//! // perform some operation
//! let array = array.as_any().downcast_ref::<Int32Array>().ok_or(
//! ArrowError::ParseError("Expects an int32".to_string()),
//! )?;
//! let array = arithmetic::add(&array, &array)?;
//!
//! // verify
//! assert_eq!(array, Int32Array::from(vec![Some(2), None, Some(6)]));
//!
//! // (drop/release)
//! Ok(())
//! }
//! ```
/*
# Design:
Main assumptions:
* A memory region is deallocated according it its own release mechanism.
* Rust shares memory regions between arrays.
* A memory region should be deallocated when no-one is using it.
The design of this module is as follows:
`ArrowArray` contains two `Arc`s, one per ABI-compatible `struct`, each containing data
according to the C Data Interface. These Arcs are used for ref counting of the structs
within Rust and lifetime management.
Each ABI-compatible `struct` knowns how to `drop` itself, calling `release`.
To import an array, unsafely create an `ArrowArray` from two pointers using [ArrowArray::try_from_raw].
To export an array, create an `ArrowArray` using [ArrowArray::try_new].
*/
use std::{
ffi::CStr,
ffi::CString,
iter,
mem::size_of,
ptr::{self, NonNull},
sync::Arc,
};
use crate::buffer::Buffer;
use crate::datatypes::{DataType, DateUnit, TimeUnit};
use crate::error::{ArrowError, Result};
use crate::util::bit_util;
/// ABI-compatible struct for `ArrowSchema` from C Data Interface
/// See https://arrow.apache.org/docs/format/CDataInterface.html#structure-definitions
/// This was created by bindgen
#[repr(C)]
#[derive(Debug)]
pub struct FFI_ArrowSchema {
format: *const ::std::os::raw::c_char,
name: *const ::std::os::raw::c_char,
metadata: *const ::std::os::raw::c_char,
flags: i64,
n_children: i64,
children: *mut *mut FFI_ArrowSchema,
dictionary: *mut FFI_ArrowSchema,
release: ::std::option::Option<unsafe extern "C" fn(arg1: *mut FFI_ArrowSchema)>,
private_data: *mut ::std::os::raw::c_void,
}
// callback used to drop [FFI_ArrowSchema] when it is exported.
unsafe extern "C" fn release_schema(schema: *mut FFI_ArrowSchema) {
let schema = &mut *schema;
// take ownership back to release it.
CString::from_raw(schema.format as *mut std::os::raw::c_char);
schema.release = None;
}
impl FFI_ArrowSchema {
/// create a new [FFI_ArrowSchema] from a format.
fn new(format: &str) -> FFI_ArrowSchema {
// https://arrow.apache.org/docs/format/CDataInterface.html#c.ArrowSchema
FFI_ArrowSchema {
format: CString::new(format).unwrap().into_raw(),
name: std::ptr::null_mut(),
metadata: std::ptr::null_mut(),
flags: 0,
n_children: 0,
children: ptr::null_mut(),
dictionary: std::ptr::null_mut(),
release: Some(release_schema),
private_data: std::ptr::null_mut(),
}
}
/// create an empty [FFI_ArrowSchema]
fn empty() -> Self {
Self {
format: std::ptr::null_mut(),
name: std::ptr::null_mut(),
metadata: std::ptr::null_mut(),
flags: 0,
n_children: 0,
children: ptr::null_mut(),
dictionary: std::ptr::null_mut(),
release: None,
private_data: std::ptr::null_mut(),
}
}
/// returns the format of this schema.
pub fn format(&self) -> &str {
unsafe { CStr::from_ptr(self.format) }
.to_str()
.expect("The external API has a non-utf8 as format")
}
}
impl Drop for FFI_ArrowSchema {
fn drop(&mut self) {
match self.release {
None => (),
Some(release) => unsafe { release(self) },
};
}
}
/// maps a DataType `format` to a [DataType](arrow::datatypes::DataType).
/// See https://arrow.apache.org/docs/format/CDataInterface.html#data-type-description-format-strings
fn to_datatype(format: &str) -> Result<DataType> {
Ok(match format {
"n" => DataType::Null,
"b" => DataType::Boolean,
"c" => DataType::Int8,
"C" => DataType::UInt8,
"s" => DataType::Int16,
"S" => DataType::UInt16,
"i" => DataType::Int32,
"I" => DataType::UInt32,
"l" => DataType::Int64,
"L" => DataType::UInt64,
"e" => DataType::Float16,
"f" => DataType::Float32,
"g" => DataType::Float64,
"z" => DataType::Binary,
"Z" => DataType::LargeBinary,
"u" => DataType::Utf8,
"U" => DataType::LargeUtf8,
"tdD" => DataType::Date32(DateUnit::Day),
"tdm" => DataType::Date64(DateUnit::Millisecond),
"tts" => DataType::Time32(TimeUnit::Second),
"ttm" => DataType::Time32(TimeUnit::Millisecond),
"ttu" => DataType::Time64(TimeUnit::Microsecond),
"ttn" => DataType::Time64(TimeUnit::Nanosecond),
_ => {
return Err(ArrowError::CDataInterface(
"The datatype \"{}\" is still not supported in Rust implementation"
.to_string(),
))
}
})
}
/// the inverse of [to_datatype]
fn from_datatype(datatype: &DataType) -> Result<String> {
Ok(match datatype {
DataType::Null => "n",
DataType::Boolean => "b",
DataType::Int8 => "c",
DataType::UInt8 => "C",
DataType::Int16 => "s",
DataType::UInt16 => "S",
DataType::Int32 => "i",
DataType::UInt32 => "I",
DataType::Int64 => "l",
DataType::UInt64 => "L",
DataType::Float16 => "e",
DataType::Float32 => "f",
DataType::Float64 => "g",
DataType::Binary => "z",
DataType::LargeBinary => "Z",
DataType::Utf8 => "u",
DataType::LargeUtf8 => "U",
DataType::Date32(DateUnit::Day) => "tdD",
DataType::Date64(DateUnit::Millisecond) => "tdm",
DataType::Time32(TimeUnit::Second) => "tts",
DataType::Time32(TimeUnit::Millisecond) => "ttm",
DataType::Time64(TimeUnit::Microsecond) => "ttu",
DataType::Time64(TimeUnit::Nanosecond) => "ttn",
z => {
return Err(ArrowError::CDataInterface(format!(
"The datatype \"{:?}\" is still not supported in Rust implementation",
z
)))
}
}
.to_string())
}
// returns the number of bits that buffer `i` (in the C data interface) is expected to have.
// This is set by the Arrow specification
fn bit_width(data_type: &DataType, i: usize) -> Result<usize> {
Ok(match (data_type, i) {
// the null buffer is bit sized
(_, 0) => 1,
// primitive types first buffer's size is given by the native types
(DataType::Boolean, 1) => 1,
(DataType::UInt8, 1) => size_of::<u8>() * 8,
(DataType::UInt16, 1) => size_of::<u16>() * 8,
(DataType::UInt32, 1) => size_of::<u32>() * 8,
(DataType::UInt64, 1) => size_of::<u64>() * 8,
(DataType::Int8, 1) => size_of::<i8>() * 8,
(DataType::Int16, 1) => size_of::<i16>() * 8,
(DataType::Int32, 1) | (DataType::Date32(_), 1) | (DataType::Time32(_), 1) => size_of::<i32>() * 8,
(DataType::Int64, 1) | (DataType::Date64(_), 1) | (DataType::Time64(_), 1) => size_of::<i64>() * 8,
(DataType::Float32, 1) => size_of::<f32>() * 8,
(DataType::Float64, 1) => size_of::<f64>() * 8,
// primitive types have a single buffer
(DataType::Boolean, _) |
(DataType::UInt8, _) |
(DataType::UInt16, _) |
(DataType::UInt32, _) |
(DataType::UInt64, _) |
(DataType::Int8, _) |
(DataType::Int16, _) |
(DataType::Int32, _) | (DataType::Date32(_), _) | (DataType::Time32(_), _) |
(DataType::Int64, _) | (DataType::Date64(_), _) | (DataType::Time64(_), _) |
(DataType::Float32, _) |
(DataType::Float64, _) => {
return Err(ArrowError::CDataInterface(format!(
"The datatype \"{:?}\" expects 2 buffers, but requested {}. Please verify that the C data interface is correctly implemented.",
data_type, i
)))
}
// Variable-sized binaries: have two buffers.
// "small": first buffer is i32, second is in bytes
(DataType::Utf8, 1) | (DataType::Binary, 1) => size_of::<i32>() * 8,
(DataType::Utf8, 2) | (DataType::Binary, 2) => size_of::<u8>() * 8,
(DataType::Utf8, _) | (DataType::Binary, _) => {
return Err(ArrowError::CDataInterface(format!(
"The datatype \"{:?}\" expects 3 buffers, but requested {}. Please verify that the C data interface is correctly implemented.",
data_type, i
)))
}
// Variable-sized binaries: have two buffers.
// LargeUtf8: first buffer is i64, second is in bytes
(DataType::LargeUtf8, 1) | (DataType::LargeBinary, 1) => size_of::<i64>() * 8,
(DataType::LargeUtf8, 2) | (DataType::LargeBinary, 2) => size_of::<u8>() * 8,
(DataType::LargeUtf8, _) | (DataType::LargeBinary, _) => {
return Err(ArrowError::CDataInterface(format!(
"The datatype \"{:?}\" expects 3 buffers, but requested {}. Please verify that the C data interface is correctly implemented.",
data_type, i
)))
}
_ => {
return Err(ArrowError::CDataInterface(format!(
"The datatype \"{:?}\" is still not supported in Rust implementation",
data_type
)))
}
})
}
/// ABI-compatible struct for ArrowArray from C Data Interface
/// See https://arrow.apache.org/docs/format/CDataInterface.html#structure-definitions
/// This was created by bindgen
#[repr(C)]
#[derive(Debug)]
pub struct FFI_ArrowArray {
pub(crate) length: i64,
pub(crate) null_count: i64,
pub(crate) offset: i64,
pub(crate) n_buffers: i64,
pub(crate) n_children: i64,
pub(crate) buffers: *mut *const ::std::os::raw::c_void,
children: *mut *mut FFI_ArrowArray,
dictionary: *mut FFI_ArrowArray,
release: ::std::option::Option<unsafe extern "C" fn(arg1: *mut FFI_ArrowArray)>,
// When exported, this MUST contain everything that is owned by this array.
// for example, any buffer pointed to in `buffers` must be here, as well as the `buffers` pointer
// itself.
// In other words, everything in [FFI_ArrowArray] must be owned by `private_data` and can assume
// that they do not outlive `private_data`.
private_data: *mut ::std::os::raw::c_void,
}
// callback used to drop [FFI_ArrowArray] when it is exported
unsafe extern "C" fn release_array(array: *mut FFI_ArrowArray) {
if array.is_null() {
return;
}
let array = &mut *array;
// take ownership of `private_data`, therefore dropping it
Box::from_raw(array.private_data as *mut PrivateData);
array.release = None;
}
struct PrivateData {
buffers: Vec<Option<Buffer>>,
buffers_ptr: Box<[*const std::os::raw::c_void]>,
}
impl FFI_ArrowArray {
/// creates a new `FFI_ArrowArray` from existing data.
/// # Safety
/// This method releases `buffers`. Consumers of this struct *must* call `release` before
/// releasing this struct, or contents in `buffers` leak.
unsafe fn new(
length: i64,
null_count: i64,
offset: i64,
n_buffers: i64,
buffers: Vec<Option<Buffer>>,
) -> Self {
let buffers_ptr = buffers
.iter()
.map(|maybe_buffer| match maybe_buffer {
// note that `raw_data` takes into account the buffer's offset
Some(b) => b.as_ptr() as *const std::os::raw::c_void,
None => std::ptr::null(),
})
.collect::<Box<[_]>>();
let pointer = buffers_ptr.as_ptr() as *mut *const std::ffi::c_void;
// create the private data owning everything.
// any other data must be added here, e.g. via a struct, to track lifetime.
let private_data = Box::new(PrivateData {
buffers,
buffers_ptr,
});
Self {
length,
null_count,
offset,
n_buffers,
n_children: 0,
buffers: pointer,
children: std::ptr::null_mut(),
dictionary: std::ptr::null_mut(),
release: Some(release_array),
private_data: Box::into_raw(private_data) as *mut ::std::os::raw::c_void,
}
}
// create an empty `FFI_ArrowArray`, which can be used to import data into
fn empty() -> Self {
Self { | n_children: 0,
buffers: std::ptr::null_mut(),
children: std::ptr::null_mut(),
dictionary: std::ptr::null_mut(),
release: None,
private_data: std::ptr::null_mut(),
}
}
}
/// returns a new buffer corresponding to the index `i` of the FFI array. It may not exist (null pointer).
/// `bits` is the number of bits that the native type of this buffer has.
/// The size of the buffer will be `ceil(self.length * bits, 8)`.
/// # Panic
/// This function panics if `i` is larger or equal to `n_buffers`.
/// # Safety
/// This function assumes that `ceil(self.length * bits, 8)` is the size of the buffer
unsafe fn create_buffer(
array: Arc<FFI_ArrowArray>,
index: usize,
len: usize,
) -> Option<Buffer> {
if array.buffers.is_null() {
return None;
}
let buffers = array.buffers as *mut *const u8;
assert!(index < array.n_buffers as usize);
let ptr = *buffers.add(index);
NonNull::new(ptr as *mut u8).map(|ptr| Buffer::from_unowned(ptr, len, array))
}
impl Drop for FFI_ArrowArray {
fn drop(&mut self) {
match self.release {
None => (),
Some(release) => unsafe { release(self) },
};
}
}
/// Struct used to move an Array from and to the C Data Interface.
/// Its main responsibility is to expose functionality that requires
/// both [FFI_ArrowArray] and [FFI_ArrowSchema].
///
/// This struct has two main paths:
///
/// ## Import from the C Data Interface
/// * [ArrowArray::empty] to allocate memory to be filled by an external call
/// * [ArrowArray::try_from_raw] to consume two non-null allocated pointers
/// ## Export to the C Data Interface
/// * [ArrowArray::try_new] to create a new [ArrowArray] from Rust-specific information
/// * [ArrowArray::into_raw] to expose two pointers for [FFI_ArrowArray] and [FFI_ArrowSchema].
///
/// # Safety
/// Whoever creates this struct is responsible for releasing their resources. Specifically,
/// consumers *must* call [ArrowArray::into_raw] and take ownership of the individual pointers,
/// calling [FFI_ArrowArray::release] and [FFI_ArrowSchema::release] accordingly.
///
/// Furthermore, this struct assumes that the incoming data agrees with the C data interface.
#[derive(Debug)]
pub struct ArrowArray {
// these are ref-counted because they can be shared by multiple buffers.
array: Arc<FFI_ArrowArray>,
schema: Arc<FFI_ArrowSchema>,
}
impl ArrowArray {
/// creates a new `ArrowArray`. This is used to export to the C Data Interface.
/// # Safety
/// See safety of [ArrowArray]
pub unsafe fn try_new(
data_type: &DataType,
len: usize,
null_count: usize,
null_buffer: Option<Buffer>,
offset: usize,
buffers: Vec<Buffer>,
_child_data: Vec<ArrowArray>,
) -> Result<Self> {
let format = from_datatype(data_type)?;
// * insert the null buffer at the start
// * make all others `Option<Buffer>`.
let new_buffers = iter::once(null_buffer)
.chain(buffers.iter().map(|b| Some(b.clone())))
.collect::<Vec<_>>();
let schema = Arc::new(FFI_ArrowSchema::new(&format));
let array = Arc::new(FFI_ArrowArray::new(
len as i64,
null_count as i64,
offset as i64,
new_buffers.len() as i64,
new_buffers,
));
Ok(ArrowArray { schema, array })
}
/// creates a new [ArrowArray] from two pointers. Used to import from the C Data Interface.
/// # Safety
/// See safety of [ArrowArray]
/// # Error
/// Errors if any of the pointers is null
pub unsafe fn try_from_raw(
array: *const FFI_ArrowArray,
schema: *const FFI_ArrowSchema,
) -> Result<Self> {
if array.is_null() || schema.is_null() {
return Err(ArrowError::MemoryError(
"At least one of the pointers passed to `try_from_raw` is null"
.to_string(),
));
};
Ok(Self {
array: Arc::from_raw(array as *mut FFI_ArrowArray),
schema: Arc::from_raw(schema as *mut FFI_ArrowSchema),
})
}
/// creates a new empty [ArrowArray]. Used to import from the C Data Interface.
/// # Safety
/// See safety of [ArrowArray]
pub unsafe fn empty() -> Self {
let schema = Arc::new(FFI_ArrowSchema::empty());
let array = Arc::new(FFI_ArrowArray::empty());
ArrowArray { schema, array }
}
/// exports [ArrowArray] to the C Data Interface
pub fn into_raw(this: ArrowArray) -> (*const FFI_ArrowArray, *const FFI_ArrowSchema) {
(Arc::into_raw(this.array), Arc::into_raw(this.schema))
}
/// returns the null bit buffer.
/// Rust implementation uses a buffer that is not part of the array of buffers.
/// The C Data interface's null buffer is part of the array of buffers.
pub fn null_bit_buffer(&self) -> Option<Buffer> {
// similar to `self.buffer_len(0)`, but without `Result`.
let buffer_len = bit_util::ceil(self.array.length as usize, 8);
unsafe { create_buffer(self.array.clone(), 0, buffer_len) }
}
/// Returns the length, in bytes, of the buffer `i` (indexed according to the C data interface)
// Rust implementation uses fixed-sized buffers, which require knowledge of their `len`.
// for variable-sized buffers, such as the second buffer of a stringArray, we need
// to fetch offset buffer's len to build the second buffer.
fn buffer_len(&self, i: usize) -> Result<usize> {
let data_type = &self.data_type()?;
Ok(match (data_type, i) {
(DataType::Utf8, 1)
| (DataType::LargeUtf8, 1)
| (DataType::Binary, 1)
| (DataType::LargeBinary, 1) => {
// the len of the offset buffer (buffer 1) equals length + 1
let bits = bit_width(data_type, i)?;
debug_assert_eq!(bits % 8, 0);
(self.array.length as usize + 1) * (bits / 8)
}
(DataType::Utf8, 2) | (DataType::Binary, 2) => {
// the len of the data buffer (buffer 2) equals the last value of the offset buffer (buffer 1)
let len = self.buffer_len(1)?;
// first buffer is the null buffer => add(1)
// we assume that pointer is aligned for `i32`, as Utf8 uses `i32` offsets.
#[allow(clippy::cast_ptr_alignment)]
let offset_buffer = unsafe {
*(self.array.buffers as *mut *const u8).add(1) as *const i32
};
// get last offset
(unsafe { *offset_buffer.add(len / size_of::<i32>() - 1) }) as usize
}
(DataType::LargeUtf8, 2) | (DataType::LargeBinary, 2) => {
// the len of the data buffer (buffer 2) equals the last value of the offset buffer (buffer 1)
let len = self.buffer_len(1)?;
// first buffer is the null buffer => add(1)
// we assume that pointer is aligned for `i64`, as Large uses `i64` offsets.
#[allow(clippy::cast_ptr_alignment)]
let offset_buffer = unsafe {
*(self.array.buffers as *mut *const u8).add(1) as *const i64
};
// get last offset
(unsafe { *offset_buffer.add(len / size_of::<i64>() - 1) }) as usize
}
// buffer len of primitive types
_ => {
let bits = bit_width(data_type, i)?;
bit_util::ceil(self.array.length as usize * bits, 8)
}
})
}
/// returns all buffers, as organized by Rust (i.e. null buffer is skipped)
pub fn buffers(&self) -> Result<Vec<Buffer>> {
(0..self.array.n_buffers - 1)
.map(|index| {
// + 1: skip null buffer
let index = (index + 1) as usize;
let len = self.buffer_len(index)?;
unsafe { create_buffer(self.array.clone(), index, len) }.ok_or_else(
|| {
ArrowError::CDataInterface(format!(
"The external buffer at position {} is null.",
index - 1
))
},
)
})
.collect()
}
/// the length of the array
pub fn len(&self) -> usize {
self.array.length as usize
}
/// whether the array is empty
pub fn is_empty(&self) -> bool {
self.array.length == 0
}
/// the offset of the array
pub fn offset(&self) -> usize {
self.array.offset as usize
}
/// the null count of the array
pub fn null_count(&self) -> usize {
self.array.null_count as usize
}
/// the data_type as declared in the schema
pub fn data_type(&self) -> Result<DataType> {
to_datatype(self.schema.format())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::array::{
make_array, Array, ArrayData, BinaryOffsetSizeTrait, BooleanArray,
GenericBinaryArray, GenericStringArray, Int32Array, StringOffsetSizeTrait,
Time32MillisecondArray,
};
use crate::compute::kernels;
use std::convert::TryFrom;
use std::sync::Arc;
#[test]
fn test_round_trip() -> Result<()> {
// create an array natively
let array = Int32Array::from(vec![1, 2, 3]);
// export it
let array = ArrowArray::try_from(array.data().as_ref().clone())?;
// (simulate consumer) import it
let data = Arc::new(ArrayData::try_from(array)?);
let array = make_array(data);
// perform some operation
let array = array.as_any().downcast_ref::<Int32Array>().unwrap();
let array = kernels::arithmetic::add(&array, &array).unwrap();
// verify
assert_eq!(array, Int32Array::from(vec![2, 4, 6]));
// (drop/release)
Ok(())
}
// case with nulls is tested in the docs, through the example on this module.
fn test_generic_string<Offset: StringOffsetSizeTrait>() -> Result<()> {
// create an array natively
let array =
GenericStringArray::<Offset>::from(vec![Some("a"), None, Some("aaa")]);
// export it
let array = ArrowArray::try_from(array.data().as_ref().clone())?;
// (simulate consumer) import it
let data = Arc::new(ArrayData::try_from(array)?);
let array = make_array(data);
// perform some operation
let array = kernels::concat::concat(&[array.as_ref(), array.as_ref()]).unwrap();
let array = array
.as_any()
.downcast_ref::<GenericStringArray<Offset>>()
.unwrap();
// verify
let expected = GenericStringArray::<Offset>::from(vec![
Some("a"),
None,
Some("aaa"),
Some("a"),
None,
Some("aaa"),
]);
assert_eq!(array, &expected);
// (drop/release)
Ok(())
}
#[test]
fn test_string() -> Result<()> {
test_generic_string::<i32>()
}
#[test]
fn test_large_string() -> Result<()> {
test_generic_string::<i64>()
}
fn test_generic_binary<Offset: BinaryOffsetSizeTrait>() -> Result<()> {
// create an array natively
let array: Vec<Option<&[u8]>> = vec![Some(b"a"), None, Some(b"aaa")];
let array = GenericBinaryArray::<Offset>::from(array);
// export it
let array = ArrowArray::try_from(array.data().as_ref().clone())?;
// (simulate consumer) import it
let data = Arc::new(ArrayData::try_from(array)?);
let array = make_array(data);
// perform some operation
let array = kernels::concat::concat(&[array.as_ref(), array.as_ref()]).unwrap();
let array = array
.as_any()
.downcast_ref::<GenericBinaryArray<Offset>>()
.unwrap();
// verify
let expected: Vec<Option<&[u8]>> = vec![
Some(b"a"),
None,
Some(b"aaa"),
Some(b"a"),
None,
Some(b"aaa"),
];
let expected = GenericBinaryArray::<Offset>::from(expected);
assert_eq!(array, &expected);
// (drop/release)
Ok(())
}
#[test]
fn test_binary() -> Result<()> {
test_generic_binary::<i32>()
}
#[test]
fn test_large_binary() -> Result<()> {
test_generic_binary::<i64>()
}
#[test]
fn test_bool() -> Result<()> {
// create an array natively
let array = BooleanArray::from(vec![None, Some(true), Some(false)]);
// export it
let array = ArrowArray::try_from(array.data().as_ref().clone())?;
// (simulate consumer) import it
let data = Arc::new(ArrayData::try_from(array)?);
let array = make_array(data);
// perform some operation
let array = array.as_any().downcast_ref::<BooleanArray>().unwrap();
let array = kernels::boolean::not(&array)?;
// verify
assert_eq!(
array,
BooleanArray::from(vec![None, Some(false), Some(true)])
);
// (drop/release)
Ok(())
}
#[test]
fn test_time32() -> Result<()> {
// create an array natively
let array = Time32MillisecondArray::from(vec![None, Some(1), Some(2)]);
// export it
let array = ArrowArray::try_from(array.data().as_ref().clone())?;
// (simulate consumer) import it
let data = Arc::new(ArrayData::try_from(array)?);
let array = make_array(data);
// perform some operation
let array = kernels::concat::concat(&[array.as_ref(), array.as_ref()]).unwrap();
let array = array
.as_any()
.downcast_ref::<Time32MillisecondArray>()
.unwrap();
// verify
assert_eq!(
array,
&Time32MillisecondArray::from(vec![
None,
Some(1),
Some(2),
None,
Some(1),
Some(2)
])
);
// (drop/release)
Ok(())
}
} | length: 0,
null_count: 0,
offset: 0,
n_buffers: 0, |
api_op_DescribeFpgaImages.go | // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package ec2
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/internal/awsutil"
)
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImagesRequest
type DescribeFpgaImagesInput struct {
_ struct{} `type:"structure"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
// the required permissions, the error response is DryRunOperation. Otherwise,
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
// The filters.
//
// * create-time - The creation time of the AFI.
//
// * fpga-image-id - The FPGA image identifier (AFI ID).
//
// * fpga-image-global-id - The global FPGA image identifier (AGFI ID).
//
// * name - The name of the AFI.
//
// * owner-id - The AWS account ID of the AFI owner.
//
// * product-code - The product code.
//
// * shell-version - The version of the AWS Shell that was used to create
// the bitstream.
//
// * state - The state of the AFI (pending | failed | available | unavailable).
//
// * tag:<key> - The key/value combination of a tag assigned to the resource.
// Use the tag key in the filter name and the tag value as the filter value.
// For example, to find all resources that have a tag with the key Owner
// and the value TeamA, specify tag:Owner for the filter name and TeamA for
// the filter value.
//
// * tag-key - The key of a tag assigned to the resource. Use this filter
// to find all resources assigned a tag with a specific key, regardless of
// the tag value.
//
// * update-time - The time of the most recent update.
Filters []Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
// The AFI IDs.
FpgaImageIds []string `locationName:"FpgaImageId" locationNameList:"item" type:"list"`
// The maximum number of results to return in a single call.
MaxResults *int64 `min:"5" type:"integer"`
// The token to retrieve the next page of results.
NextToken *string `min:"1" type:"string"`
// Filters the AFI by owner. Specify an AWS account ID, self (owner is the sender
// of the request), or an AWS owner alias (valid values are amazon | aws-marketplace).
Owners []string `locationName:"Owner" locationNameList:"Owner" type:"list"`
}
// String returns the string representation
func (s DescribeFpgaImagesInput) String() string {
return awsutil.Prettify(s)
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeFpgaImagesInput) Validate() error {
invalidParams := aws.ErrInvalidParams{Context: "DescribeFpgaImagesInput"}
if s.MaxResults != nil && *s.MaxResults < 5 {
invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 5))
}
if s.NextToken != nil && len(*s.NextToken) < 1 {
invalidParams.Add(aws.NewErrParamMinLen("NextToken", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImagesResult
type DescribeFpgaImagesOutput struct {
_ struct{} `type:"structure"`
// Information about the FPGA images.
FpgaImages []FpgaImage `locationName:"fpgaImageSet" locationNameList:"item" type:"list"`
// The token to use to retrieve the next page of results. This value is null
// when there are no more results to return.
NextToken *string `locationName:"nextToken" min:"1" type:"string"`
}
// String returns the string representation
func (s DescribeFpgaImagesOutput) String() string {
return awsutil.Prettify(s)
}
const opDescribeFpgaImages = "DescribeFpgaImages"
// DescribeFpgaImagesRequest returns a request value for making API operation for
// Amazon Elastic Compute Cloud.
//
// Describes the Amazon FPGA Images (AFIs) available to you. These include public
// AFIs, private AFIs that you own, and AFIs owned by other AWS accounts for
// which you have load permissions.
//
// // Example sending a request using DescribeFpgaImagesRequest.
// req := client.DescribeFpgaImagesRequest(params)
// resp, err := req.Send(context.TODO())
// if err == nil {
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImages
func (c *Client) DescribeFpgaImagesRequest(input *DescribeFpgaImagesInput) DescribeFpgaImagesRequest {
op := &aws.Operation{
Name: opDescribeFpgaImages,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &aws.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeFpgaImagesInput{}
}
req := c.newRequest(op, input, &DescribeFpgaImagesOutput{})
return DescribeFpgaImagesRequest{Request: req, Input: input, Copy: c.DescribeFpgaImagesRequest}
}
// DescribeFpgaImagesRequest is the request type for the
// DescribeFpgaImages API operation.
type DescribeFpgaImagesRequest struct {
*aws.Request
Input *DescribeFpgaImagesInput
Copy func(*DescribeFpgaImagesInput) DescribeFpgaImagesRequest
}
// Send marshals and sends the DescribeFpgaImages API request.
func (r DescribeFpgaImagesRequest) Send(ctx context.Context) (*DescribeFpgaImagesResponse, error) {
r.Request.SetContext(ctx)
err := r.Request.Send()
if err != nil {
return nil, err
}
resp := &DescribeFpgaImagesResponse{
DescribeFpgaImagesOutput: r.Request.Data.(*DescribeFpgaImagesOutput),
response: &aws.Response{Request: r.Request},
}
return resp, nil
}
// NewDescribeFpgaImagesRequestPaginator returns a paginator for DescribeFpgaImages.
// Use Next method to get the next page, and CurrentPage to get the current
// response page from the paginator. Next will return false, if there are
// no more pages, or an error was encountered.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over pages.
// req := client.DescribeFpgaImagesRequest(input)
// p := ec2.NewDescribeFpgaImagesRequestPaginator(req)
//
// for p.Next(context.TODO()) {
// page := p.CurrentPage()
// }
//
// if err := p.Err(); err != nil {
// return err
// }
//
func NewDescribeFpgaImagesPaginator(req DescribeFpgaImagesRequest) DescribeFpgaImagesPaginator |
// DescribeFpgaImagesPaginator is used to paginate the request. This can be done by
// calling Next and CurrentPage.
type DescribeFpgaImagesPaginator struct {
aws.Pager
}
func (p *DescribeFpgaImagesPaginator) CurrentPage() *DescribeFpgaImagesOutput {
return p.Pager.CurrentPage().(*DescribeFpgaImagesOutput)
}
// DescribeFpgaImagesResponse is the response type for the
// DescribeFpgaImages API operation.
type DescribeFpgaImagesResponse struct {
*DescribeFpgaImagesOutput
response *aws.Response
}
// SDKResponseMetdata returns the response metadata for the
// DescribeFpgaImages request.
func (r *DescribeFpgaImagesResponse) SDKResponseMetdata() *aws.Response {
return r.response
}
| {
return DescribeFpgaImagesPaginator{
Pager: aws.Pager{
NewRequest: func(ctx context.Context) (*aws.Request, error) {
var inCpy *DescribeFpgaImagesInput
if req.Input != nil {
tmp := *req.Input
inCpy = &tmp
}
newReq := req.Copy(inCpy)
newReq.SetContext(ctx)
return newReq.Request, nil
},
},
}
} |
indexes_test.go | package db
import (
"context"
"fmt"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/internal/db/dbconn"
"github.com/sourcegraph/sourcegraph/internal/db/dbtesting"
)
func TestGetIndexByID(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := testDB()
// Index does not exist initially
if _, exists, err := db.GetIndexByID(context.Background(), 1); err != nil {
t.Fatalf("unexpected error getting index: %s", err)
} else if exists {
t.Fatal("unexpected record")
}
queuedAt := time.Unix(1587396557, 0).UTC()
startedAt := queuedAt.Add(time.Minute)
expected := Index{
ID: 1,
Commit: makeCommit(1),
QueuedAt: queuedAt,
State: "processing",
FailureSummary: nil,
FailureStacktrace: nil,
StartedAt: &startedAt,
FinishedAt: nil,
RepositoryID: 123,
Rank: nil,
}
insertIndexes(t, dbconn.Global, expected)
if index, exists, err := db.GetIndexByID(context.Background(), 1); err != nil {
t.Fatalf("unexpected error getting index: %s", err)
} else if !exists {
t.Fatal("expected record to exist")
} else if diff := cmp.Diff(expected, index); diff != "" {
t.Errorf("unexpected index (-want +got):\n%s", diff)
}
}
func TestGetQueuedIndexRank(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := testDB()
t1 := time.Unix(1587396557, 0).UTC()
t2 := t1.Add(+time.Minute * 5)
t3 := t1.Add(+time.Minute * 3)
t4 := t1.Add(+time.Minute * 1)
t5 := t1.Add(+time.Minute * 4)
t6 := t1.Add(+time.Minute * 2)
insertIndexes(t, dbconn.Global,
Index{ID: 1, QueuedAt: t1, State: "queued"},
Index{ID: 2, QueuedAt: t2, State: "queued"},
Index{ID: 3, QueuedAt: t3, State: "queued"},
Index{ID: 4, QueuedAt: t4, State: "queued"},
Index{ID: 5, QueuedAt: t5, State: "queued"},
Index{ID: 6, QueuedAt: t6, State: "processing"},
)
if index, _, _ := db.GetIndexByID(context.Background(), 1); index.Rank == nil || *index.Rank != 1 {
t.Errorf("unexpected rank. want=%d have=%s", 1, printableRank{index.Rank})
}
if index, _, _ := db.GetIndexByID(context.Background(), 2); index.Rank == nil || *index.Rank != 5 {
t.Errorf("unexpected rank. want=%d have=%s", 5, printableRank{index.Rank})
}
if index, _, _ := db.GetIndexByID(context.Background(), 3); index.Rank == nil || *index.Rank != 3 {
t.Errorf("unexpected rank. want=%d have=%s", 3, printableRank{index.Rank})
}
if index, _, _ := db.GetIndexByID(context.Background(), 4); index.Rank == nil || *index.Rank != 2 {
t.Errorf("unexpected rank. want=%d have=%s", 2, printableRank{index.Rank})
}
if index, _, _ := db.GetIndexByID(context.Background(), 5); index.Rank == nil || *index.Rank != 4 {
t.Errorf("unexpected rank. want=%d have=%s", 4, printableRank{index.Rank})
}
// Only considers queued indexes to determine rank
if index, _, _ := db.GetIndexByID(context.Background(), 6); index.Rank != nil {
t.Errorf("unexpected rank. want=%s have=%s", "nil", printableRank{index.Rank})
}
}
func TestIndexQueueSize(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := testDB()
insertIndexes(t, dbconn.Global,
Index{ID: 1, State: "queued"},
Index{ID: 2, State: "errored"},
Index{ID: 3, State: "processing"},
Index{ID: 4, State: "completed"},
Index{ID: 5, State: "completed"},
Index{ID: 6, State: "queued"},
Index{ID: 7, State: "processing"},
Index{ID: 8, State: "completed"},
Index{ID: 9, State: "processing"},
Index{ID: 10, State: "queued"},
)
count, err := db.IndexQueueSize(context.Background())
if err != nil {
t.Fatalf("unexpected error getting index queue size: %s", err)
}
if count != 3 {
t.Errorf("unexpected count. want=%d have=%d", 3, count)
}
}
func TestIsQueued(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := testDB()
insertIndexes(t, dbconn.Global, Index{ID: 1, RepositoryID: 1, Commit: makeCommit(1)})
insertUploads(t, dbconn.Global, Upload{ID: 2, RepositoryID: 2, Commit: makeCommit(2)})
testCases := []struct {
repositoryID int
commit string
expected bool
}{
{1, makeCommit(1), true},
{1, makeCommit(2), false},
{2, makeCommit(1), false},
{2, makeCommit(2), true},
{3, makeCommit(1), false},
{3, makeCommit(2), false},
}
for _, testCase := range testCases {
name := fmt.Sprintf("repositoryId=%d commit=%s", testCase.repositoryID, testCase.commit)
t.Run(name, func(t *testing.T) {
queued, err := db.IsQueued(context.Background(), testCase.repositoryID, testCase.commit)
if err != nil {
t.Fatalf("unexpected error checking if commit is queued: %s", err)
}
if queued != testCase.expected |
})
}
}
func TestInsertIndex(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
id, err := db.InsertIndex(context.Background(), Index{
Commit: makeCommit(1),
State: "queued",
RepositoryID: 50,
})
if err != nil {
t.Fatalf("unexpected error enqueueing index: %s", err)
}
rank := 1
expected := Index{
ID: id,
Commit: makeCommit(1),
QueuedAt: time.Time{},
State: "queued",
FailureSummary: nil,
FailureStacktrace: nil,
StartedAt: nil,
FinishedAt: nil,
RepositoryID: 50,
Rank: &rank,
}
if index, exists, err := db.GetIndexByID(context.Background(), id); err != nil {
t.Fatalf("unexpected error getting index: %s", err)
} else if !exists {
t.Fatal("expected record to exist")
} else {
// Update auto-generated timestamp
expected.QueuedAt = index.QueuedAt
if diff := cmp.Diff(expected, index); diff != "" {
t.Errorf("unexpected index (-want +got):\n%s", diff)
}
}
}
func TestMarkIndexComplete(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
insertIndexes(t, dbconn.Global, Index{ID: 1, State: "queued"})
if err := db.MarkIndexComplete(context.Background(), 1); err != nil {
t.Fatalf("unexpected error marking index as complete: %s", err)
}
if index, exists, err := db.GetIndexByID(context.Background(), 1); err != nil {
t.Fatalf("unexpected error getting index: %s", err)
} else if !exists {
t.Fatal("expected record to exist")
} else if index.State != "completed" {
t.Errorf("unexpected state. want=%q have=%q", "completed", index.State)
}
}
func TestMarkIndexErrored(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
insertIndexes(t, dbconn.Global, Index{ID: 1, State: "queued"})
if err := db.MarkIndexErrored(context.Background(), 1, "oops", ""); err != nil {
t.Fatalf("unexpected error marking index as complete: %s", err)
}
if index, exists, err := db.GetIndexByID(context.Background(), 1); err != nil {
t.Fatalf("unexpected error getting index: %s", err)
} else if !exists {
t.Fatal("expected record to exist")
} else if index.State != "errored" {
t.Errorf("unexpected state. want=%q have=%q", "errored", index.State)
}
}
func TestDequeueIndexProcessSuccess(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := testDB()
// Add dequeueable index
insertIndexes(t, dbconn.Global, Index{ID: 1, State: "queued"})
index, tx, ok, err := db.DequeueIndex(context.Background())
if err != nil {
t.Fatalf("unexpected error dequeueing index: %s", err)
}
if !ok {
t.Fatalf("expected something to be dequeueable")
}
if index.ID != 1 {
t.Errorf("unexpected index id. want=%d have=%d", 1, index.ID)
}
if index.State != "processing" {
t.Errorf("unexpected state. want=%s have=%s", "processing", index.State)
}
if state, _, err := scanFirstString(dbconn.Global.Query("SELECT state FROM lsif_indexes WHERE id = 1")); err != nil {
t.Errorf("unexpected error getting state: %s", err)
} else if state != "processing" {
t.Errorf("unexpected state outside of txn. want=%s have=%s", "processing", state)
}
if err := tx.MarkIndexComplete(context.Background(), index.ID); err != nil {
t.Fatalf("unexpected error marking index complete: %s", err)
}
_ = tx.Done(nil)
if state, _, err := scanFirstString(dbconn.Global.Query("SELECT state FROM lsif_indexes WHERE id = 1")); err != nil {
t.Errorf("unexpected error getting state: %s", err)
} else if state != "completed" {
t.Errorf("unexpected state outside of txn. want=%s have=%s", "completed", state)
}
}
func TestDequeueIndexProcessError(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := testDB()
// Add dequeueable index
insertIndexes(t, dbconn.Global, Index{ID: 1, State: "queued"})
index, tx, ok, err := db.DequeueIndex(context.Background())
if err != nil {
t.Fatalf("unexpected error dequeueing index: %s", err)
}
if !ok {
t.Fatalf("expected something to be dequeueable")
}
if index.ID != 1 {
t.Errorf("unexpected index id. want=%d have=%d", 1, index.ID)
}
if index.State != "processing" {
t.Errorf("unexpected state. want=%s have=%s", "processing", index.State)
}
if state, _, err := scanFirstString(dbconn.Global.Query("SELECT state FROM lsif_indexes WHERE id = 1")); err != nil {
t.Errorf("unexpected error getting state: %s", err)
} else if state != "processing" {
t.Errorf("unexpected state outside of txn. want=%s have=%s", "processing", state)
}
if err := tx.MarkIndexErrored(context.Background(), index.ID, "test summary", "test stacktrace"); err != nil {
t.Fatalf("unexpected error marking index complete: %s", err)
}
_ = tx.Done(nil)
if state, _, err := scanFirstString(dbconn.Global.Query("SELECT state FROM lsif_indexes WHERE id = 1")); err != nil {
t.Errorf("unexpected error getting state: %s", err)
} else if state != "errored" {
t.Errorf("unexpected state outside of txn. want=%s have=%s", "errored", state)
}
if summary, _, err := scanFirstString(dbconn.Global.Query("SELECT failure_summary FROM lsif_indexes WHERE id = 1")); err != nil {
t.Errorf("unexpected error getting failure_summary: %s", err)
} else if summary != "test summary" {
t.Errorf("unexpected failure summary outside of txn. want=%s have=%s", "test summary", summary)
}
if stacktrace, _, err := scanFirstString(dbconn.Global.Query("SELECT failure_stacktrace FROM lsif_indexes WHERE id = 1")); err != nil {
t.Errorf("unexpected error getting failure_stacktrace: %s", err)
} else if stacktrace != "test stacktrace" {
t.Errorf("unexpected failure stacktrace outside of txn. want=%s have=%s", "test stacktrace", stacktrace)
}
}
func TestDequeueIndexSkipsLocked(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := testDB()
t1 := time.Unix(1587396557, 0).UTC()
t2 := t1.Add(time.Minute)
t3 := t2.Add(time.Minute)
insertIndexes(
t,
dbconn.Global,
Index{ID: 1, State: "queued", QueuedAt: t1},
Index{ID: 2, State: "processing", QueuedAt: t2},
Index{ID: 3, State: "queued", QueuedAt: t3},
)
tx1, err := dbconn.Global.BeginTx(context.Background(), nil)
if err != nil {
t.Fatal(err)
}
defer func() { _ = tx1.Rollback() }()
// Row lock index 1 in a transaction which should be skipped by ResetStalled
if _, err := tx1.Query(`SELECT * FROM lsif_indexes WHERE id = 1 FOR UPDATE`); err != nil {
t.Fatal(err)
}
index, tx2, ok, err := db.DequeueIndex(context.Background())
if err != nil {
t.Fatalf("unexpected error dequeueing index: %s", err)
}
if !ok {
t.Fatalf("expected something to be dequeueable")
}
defer func() { _ = tx2.Done(nil) }()
if index.ID != 3 {
t.Errorf("unexpected index id. want=%d have=%d", 3, index.ID)
}
if index.State != "processing" {
t.Errorf("unexpected state. want=%s have=%s", "processing", index.State)
}
}
func TestDequeueIndexEmpty(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := testDB()
_, tx, ok, err := db.DequeueIndex(context.Background())
if err != nil {
t.Fatalf("unexpected error dequeueing index: %s", err)
}
if ok {
_ = tx.Done(nil)
t.Fatalf("unexpected dequeue")
}
}
func TestResetStalledIndexes(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := testDB()
now := time.Unix(1587396557, 0).UTC()
t1 := now.Add(-time.Second * 6) // old
t2 := now.Add(-time.Second * 2) // new enough
t3 := now.Add(-time.Second * 3) // new enough
t4 := now.Add(-time.Second * 8) // old
t5 := now.Add(-time.Second * 8) // old
insertIndexes(t, dbconn.Global,
Index{ID: 1, State: "processing", StartedAt: &t1},
Index{ID: 2, State: "processing", StartedAt: &t2},
Index{ID: 3, State: "processing", StartedAt: &t3},
Index{ID: 4, State: "processing", StartedAt: &t4},
Index{ID: 5, State: "processing", StartedAt: &t5},
)
tx, err := dbconn.Global.BeginTx(context.Background(), nil)
if err != nil {
t.Fatal(err)
}
defer func() { _ = tx.Rollback() }()
// Row lock index 5 in a transaction which should be skipped by ResetStalled
if _, err := tx.Query(`SELECT * FROM lsif_indexes WHERE id = 5 FOR UPDATE`); err != nil {
t.Fatal(err)
}
expected := []int{1, 4}
if ids, err := db.ResetStalledIndexes(context.Background(), now); err != nil {
t.Fatalf("unexpected error resetting stalled indexes: %s", err)
} else if diff := cmp.Diff(expected, ids); diff != "" {
t.Errorf("unexpected ids (-want +got):\n%s", diff)
}
}
| {
t.Errorf("unexpected state. want=%v have=%v", testCase.expected, queued)
} |
clickmeeting.go | package clickmeeting
| CreateRoom(room NewRoom, opts ...CreateRoomOption) (Room, error)
UpdateRoom(roomID int, opts ...UpdateRoomOption) (Room, error)
DeleteRoom(roomID int) error
GetSessions(roomID int) ([]SessionSummary, error)
GetSession(roomID int, sessionID int) (Session, error)
GenerateAccessTokens(roomID int, howMany int) ([]AccessToken, error)
GetAccessTokens(roomID int) ([]AccessToken, error)
AutoLoginHash(roomID int) (string, error)
SendInvitation(roomID int, language string, attendees []string, opts ...SendInvitationOption) error
GetRegistrations(roomID int, status string) ([]Participant, error)
RegisterParticipant(roomID int, participant NewParticipant, opts ...RegisterParticipantOption) (string, error)
GetParticipants(roomID int, sessionID int) ([]Participant, error)
} | type Client interface {
ListRooms(status RoomStatus) ([]Room, error) |
index.ts | import 'mocha';
import * as renderer from 'react-test-renderer';
import * as React from 'react';
import xs, {Stream} from 'xstream';
import * as ReactNative from 'react-native';
import {h, ReactSource, makeCycleReactComponent} from '@cycle/react';
import {run} from '@cycle/run';
const assert = require('assert');
const {View, Text} = ReactNative;
class | extends React.PureComponent<any, any> {
public press() {
if (this.props.onPress) {
this.props.onPress(null);
}
}
public render() {
return this.props.children;
}
}
describe('React Native driver', function () {
it('converts an MVI Cycle app into a React component', function (done) {
function main(sources: {react: ReactSource}) {
const inc = Symbol();
const inc$ = sources.react.select(inc).events('press');
const count$ = inc$.fold((acc: number, x: any) => acc + 1, 0);
const vdom$ = count$.map((i: number) =>
h(Touchable, {sel: inc}, [h(View, [h(Text, {}, '' + i)])])
);
return {react: vdom$};
}
function testDriver(sink: Stream<React.ReactElement<any>>) {
let turn = 0;
const source = new ReactSource();
const Root = makeCycleReactComponent(() => ({source, sink}));
const r = renderer.create(React.createElement(Root as any));
const root = r.root;
const check = () => {
const to = root.findByType(Touchable);
const view = to.props.children;
const text = view.props.children;
assert.strictEqual(text.props.children, `${turn}`);
to.instance.press();
turn++;
if (turn === 3) {
done();
}
};
setTimeout(check, 50);
setTimeout(check, 100);
setTimeout(check, 150);
return source;
}
run(main, {react: testDriver});
});
});
| Touchable |
fake_certificates_client.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
v1alpha1 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/certificates/v1alpha1"
restclient "k8s.io/kubernetes/pkg/client/restclient"
core "k8s.io/kubernetes/pkg/client/testing/core"
)
type FakeCertificatesV1alpha1 struct {
*core.Fake
}
func (c *FakeCertificatesV1alpha1) CertificateSigningRequests() v1alpha1.CertificateSigningRequestInterface {
return &FakeCertificateSigningRequests{c}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeCertificatesV1alpha1) RESTClient() restclient.Interface {
var ret *restclient.RESTClient
return ret
} | |
cpe_matchers_test.go | package common
import (
"testing"
"github.com/anchore/grype/grype/cpe"
"github.com/anchore/grype/grype/match"
"github.com/anchore/grype/grype/version"
"github.com/anchore/grype/grype/vulnerability"
"github.com/anchore/grype/internal"
"github.com/anchore/syft/syft/pkg"
)
func must(c cpe.CPE, e error) cpe.CPE {
if e != nil {
panic(e)
}
return c
}
type mockCPEProvider struct {
data map[string]map[string][]*vulnerability.Vulnerability
}
func newMockProviderByCPE() *mockCPEProvider {
pr := mockCPEProvider{
data: make(map[string]map[string][]*vulnerability.Vulnerability),
}
pr.stub()
return &pr
}
func (pr *mockCPEProvider) stub() {
pr.data["nvd"] = map[string][]*vulnerability.Vulnerability{
"activerecord": {
{
Constraint: version.MustGetConstraint("< 3.7.6", version.SemanticFormat),
ID: "CVE-2017-fake-1",
CPEs: []cpe.CPE{
must(cpe.New("cpe:2.3:*:activerecord:activerecord:*:*:*:*:*:rails:*:*")),
},
},
{
Constraint: version.MustGetConstraint("< 3.7.4", version.SemanticFormat),
ID: "CVE-2017-fake-2",
CPEs: []cpe.CPE{ | must(cpe.New("cpe:2.3:*:activerecord:activerecord:*:*:*:*:*:ruby:*:*")),
},
},
{
Constraint: version.MustGetConstraint("= 4.0.1", version.SemanticFormat),
ID: "CVE-2017-fake-3",
CPEs: []cpe.CPE{
must(cpe.New("cpe:2.3:*:couldntgetthisrightcouldyou:activerecord:4.0.1:*:*:*:*:*:*:*")),
},
},
{
Constraint: version.MustGetConstraint("= 4.0.1", version.SemanticFormat),
ID: "CVE-2017-fake-3",
CPEs: []cpe.CPE{
must(cpe.New("cpe:2.3:*:couldntgetthisrightcouldyou:activerecord:4.0.1:*:*:*:*:*:*:*")),
},
},
},
"awesome": {
{
Constraint: version.MustGetConstraint("< 98SP3", version.UnknownFormat),
ID: "CVE-2017-fake-4",
CPEs: []cpe.CPE{
must(cpe.New("cpe:2.3:*:awesome:awesome:*:*:*:*:*:*:*:*")),
},
},
},
}
}
func (pr *mockCPEProvider) GetByCPE(c cpe.CPE) ([]*vulnerability.Vulnerability, error) {
return pr.data["nvd"][c.Product], nil
}
func TestFindMatchesByPackageCPE(t *testing.T) {
tests := []struct {
name string
p pkg.Package
expected []string
}{
{
name: "match from range",
p: pkg.Package{
Name: "activerecord",
Version: "3.7.5",
Language: pkg.Ruby,
Type: pkg.BundlerPkg,
},
expected: []string{
"CVE-2017-fake-1",
},
},
{
name: "multiple matches",
p: pkg.Package{
Name: "activerecord",
Version: "3.7.3",
Language: pkg.Ruby,
Type: pkg.BundlerPkg,
},
expected: []string{
"CVE-2017-fake-1",
"CVE-2017-fake-2",
},
},
{
name: "exact match",
p: pkg.Package{
Name: "activerecord",
Version: "4.0.1",
Language: pkg.Ruby,
Type: pkg.BundlerPkg,
},
expected: []string{
"CVE-2017-fake-3",
},
},
{
name: "no match",
p: pkg.Package{
Name: "couldntgetthisrightcouldyou",
Version: "4.0.1",
Language: pkg.Ruby,
Type: pkg.BundlerPkg,
},
expected: []string{},
},
{
name: "fuzzy version match",
p: pkg.Package{
Name: "awesome",
Version: "98SE1",
},
expected: []string{
"CVE-2017-fake-4",
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
store := newMockProviderByCPE()
actual, err := FindMatchesByPackageCPE(store, &test.p, match.PythonMatcher)
if err != nil {
t.Fatalf("error while finding matches: %+v", err)
}
if len(actual) != len(test.expected) {
for _, a := range actual {
t.Errorf(" entry: %+v", a)
}
t.Fatalf("unexpected matches count: %d", len(actual))
}
foundCVEs := internal.NewStringSet()
for _, a := range actual {
foundCVEs.Add(a.Vulnerability.ID)
if a.Type != match.FuzzyMatch {
t.Error("fuzzy match not indicated")
}
if a.Package.Name != test.p.Name {
t.Errorf("failed to capture correct original package: %s", a.Package.Name)
}
if a.Matcher != match.PythonMatcher {
t.Errorf("failed to capture matcher name: %s", a.Matcher)
}
if a.IndirectPackage != nil {
t.Fatalf("should not have captured indirect package")
}
if a.Confidence != 0.9 {
t.Fatalf("unexpected confidence: %f", a.Confidence)
}
}
for _, id := range test.expected {
if !foundCVEs.Contains(id) {
t.Errorf("missing CVE: %s", id)
}
}
})
}
} | |
test_micro_http.pb.go | // Code generated by protoc-gen-go-micro. DO NOT EDIT.
// protoc-gen-go-micro version: v3.5.3
// source: test.proto
package pb
import (
context "context"
v3 "go.unistack.org/micro-client-http/v3"
v31 "go.unistack.org/micro-server-http/v3"
api "go.unistack.org/micro/v3/api"
client "go.unistack.org/micro/v3/client"
server "go.unistack.org/micro/v3/server"
http "net/http"
time "time"
)
type testDoubleClient struct {
c client.Client
name string
}
func NewTestDoubleClient(name string, c client.Client) TestDoubleClient {
return &testDoubleClient{c: c, name: name}
}
func (c *testDoubleClient) CallDouble(ctx context.Context, req *CallReq, opts ...client.CallOption) (*CallRsp, error) {
opts = append(opts,
v3.Method(http.MethodPost),
v3.Path("/v1/testdouble/call/name/{name}"),
v3.Body("*"),
)
opts = append(opts, client.WithRequestTimeout(time.Second*5))
rsp := &CallRsp{}
err := c.c.Call(ctx, c.c.NewRequest(c.name, "TestDouble.CallDouble", req), rsp, opts...)
if err != nil {
return nil, err
}
return rsp, nil
}
type testDoubleServer struct {
TestDoubleServer
}
func (h *testDoubleServer) CallDouble(ctx context.Context, req *CallReq, rsp *CallRsp) error {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Second*5)
defer cancel()
return h.TestDoubleServer.CallDouble(ctx, req, rsp)
}
func RegisterTestDoubleServer(s server.Server, sh TestDoubleServer, opts ...server.HandlerOption) error {
type testDouble interface {
CallDouble(ctx context.Context, req *CallReq, rsp *CallRsp) error
}
type TestDouble struct {
testDouble
}
h := &testDoubleServer{sh}
var nopts []server.HandlerOption
for _, endpoint := range TestDoubleEndpoints {
nopts = append(nopts, api.WithEndpoint(&endpoint))
}
return s.Handle(s.NewHandler(&TestDouble{h}, append(nopts, opts...)...))
}
type testClient struct {
c client.Client
name string
}
func | (name string, c client.Client) TestClient {
return &testClient{c: c, name: name}
}
func (c *testClient) CallRepeatedString(ctx context.Context, req *CallReq, opts ...client.CallOption) (*CallRsp, error) {
opts = append(opts,
v3.Method(http.MethodPost),
v3.Path("/v1/test/call_repeated_string"),
v3.Body("*"),
)
opts = append(opts, client.WithRequestTimeout(time.Second*5))
rsp := &CallRsp{}
err := c.c.Call(ctx, c.c.NewRequest(c.name, "Test.CallRepeatedString", req), rsp, opts...)
if err != nil {
return nil, err
}
return rsp, nil
}
func (c *testClient) CallRepeatedInt64(ctx context.Context, req *CallReq, opts ...client.CallOption) (*CallRsp, error) {
opts = append(opts,
v3.Method(http.MethodPost),
v3.Path("/v1/test/call_repeated_int64"),
v3.Body("*"),
)
opts = append(opts, client.WithRequestTimeout(time.Second*5))
rsp := &CallRsp{}
err := c.c.Call(ctx, c.c.NewRequest(c.name, "Test.CallRepeatedInt64", req), rsp, opts...)
if err != nil {
return nil, err
}
return rsp, nil
}
func (c *testClient) Call(ctx context.Context, req *CallReq, opts ...client.CallOption) (*CallRsp, error) {
opts = append(opts,
v3.Method(http.MethodPost),
v3.Path("/v1/test/call/{name}"),
v3.Body("*"),
)
opts = append(opts,
v3.Cookie("Csrftoken", "true"),
v3.Header("Clientid", "true"),
)
opts = append(opts, client.WithRequestTimeout(time.Second*5))
rsp := &CallRsp{}
err := c.c.Call(ctx, c.c.NewRequest(c.name, "Test.Call", req), rsp, opts...)
if err != nil {
return nil, err
}
return rsp, nil
}
func (c *testClient) CallError(ctx context.Context, req *CallReq1, opts ...client.CallOption) (*CallRsp1, error) {
opts = append(opts,
v3.Method(http.MethodPost),
v3.Path("/v1/test/callerror/{name}"),
v3.Body("*"),
)
rsp := &CallRsp1{}
err := c.c.Call(ctx, c.c.NewRequest(c.name, "Test.CallError", req), rsp, opts...)
if err != nil {
return nil, err
}
return rsp, nil
}
type testServer struct {
TestServer
}
func (h *testServer) CallRepeatedString(ctx context.Context, req *CallReq, rsp *CallRsp) error {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Second*5)
defer cancel()
return h.TestServer.CallRepeatedString(ctx, req, rsp)
}
func (h *testServer) CallRepeatedInt64(ctx context.Context, req *CallReq, rsp *CallRsp) error {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Second*5)
defer cancel()
return h.TestServer.CallRepeatedInt64(ctx, req, rsp)
}
func (h *testServer) Call(ctx context.Context, req *CallReq, rsp *CallRsp) error {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Second*5)
defer cancel()
v31.FillRequest(ctx, req,
v31.Header("Clientid", "true"),
v31.Cookie("Csrftoken", "true"),
)
return h.TestServer.Call(ctx, req, rsp)
}
func (h *testServer) CallError(ctx context.Context, req *CallReq1, rsp *CallRsp1) error {
return h.TestServer.CallError(ctx, req, rsp)
}
func RegisterTestServer(s server.Server, sh TestServer, opts ...server.HandlerOption) error {
type test interface {
CallRepeatedString(ctx context.Context, req *CallReq, rsp *CallRsp) error
CallRepeatedInt64(ctx context.Context, req *CallReq, rsp *CallRsp) error
Call(ctx context.Context, req *CallReq, rsp *CallRsp) error
CallError(ctx context.Context, req *CallReq1, rsp *CallRsp1) error
}
type Test struct {
test
}
h := &testServer{sh}
var nopts []server.HandlerOption
for _, endpoint := range TestEndpoints {
nopts = append(nopts, api.WithEndpoint(&endpoint))
}
return s.Handle(s.NewHandler(&Test{h}, append(nopts, opts...)...))
}
| NewTestClient |
vacuum.js | var assert = require("assert")
var dirname = require("path").dirname
var resolve = require("path").resolve
var rimraf = require("rimraf")
var lstat = require("graceful-fs").lstat
var readdir = require("graceful-fs").readdir
var rmdir = require("graceful-fs").rmdir
var unlink = require("graceful-fs").unlink
module.exports = vacuum
function vacuum(leaf, options, cb) {
assert(typeof leaf === "string", "must pass in path to remove")
assert(typeof cb === "function", "must pass in callback")
if (!options) options = {}
assert(typeof options === "object", "options must be an object")
var log = options.log ? options.log : function () {}
var base = options.base
if (base && resolve(leaf).indexOf(resolve(base)) !== 0) {
return cb(new Error(resolve(leaf) + " is not a child of " + resolve(base)))
}
lstat(leaf, function (error, stat) {
if (error) {
if (error.code === "ENOENT") return cb(null)
log(error.stack)
return cb(error)
}
if (!(stat && (stat.isDirectory() || stat.isSymbolicLink() || stat.isFile()))) {
log(leaf, "is not a directory, file, or link") | if (options.purge) {
log("purging", leaf)
rimraf(leaf, function (error) {
if (error) return cb(error)
next(dirname(leaf))
})
}
else if (!stat.isDirectory()) {
log("removing", leaf)
unlink(leaf, function (error) {
if (error) return cb(error)
next(dirname(leaf))
})
}
else {
next(leaf)
}
})
function next(branch) {
// either we've reached the base or we've reached the root
if ((base && resolve(branch) === resolve(base)) || branch === dirname(branch)) {
log("finished vacuuming up to", branch)
return cb(null)
}
readdir(branch, function (error, files) {
if (error) {
if (error.code === "ENOENT") return cb(null)
log("unable to check directory", branch, "due to", error.message)
return cb(error)
}
if (files.length > 0) {
log("quitting because other entries in", branch)
return cb(null)
}
log("removing", branch)
lstat(branch, function (error, stat) {
if (error) {
if (error.code === "ENOENT") return cb(null)
log("unable to lstat", branch, "due to", error.message)
return cb(error)
}
var remove = stat.isDirectory() ? rmdir : unlink
remove(branch, function (error) {
if (error) {
if (error.code === "ENOENT") return cb(null)
log("unable to remove", branch, "due to", error.message)
return cb(error)
}
next(dirname(branch))
})
})
})
}
} | return cb(new Error(leaf + " is not a directory, file, or link"))
}
|
constants.py | """
Package-level constants
"""
from strenum import StrEnum
class | (StrEnum):
"""
Values for the SUMLEV column in PL94 data
"""
STATE = "040"
STATE_COUNTY = "050"
STATE_COUNTY_TRACT = "140"
STATE_COUNTY_TRACT_BLOCKGROUP = "150"
STATE_COUNTY_TRACT_BLOCKGROUP_BLOCK = "750"
| SummaryLevel |
test_03_ch5_4.py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 23:09:40 2020
5.4 池化层
https://tangshusen.me/Dive-into-DL-PyTorch/#/chapter05_CNN/5.4_pooling
@author: bejin
"""
import torch
from torch import nn
def pool2d | ol_size, mode='max'):
X = X.float()
p_h, p_w = pool_size
Y = torch.zeros(X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
if mode == 'max':
Y[i, j] = X[i: i + p_h, j: j + p_w].max()
elif mode == 'avg':
Y[i, j] = X[i: i + p_h, j: j + p_w].mean()
return Y
X = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
pool2d(X, (2, 2))
pool2d(X, (2, 2), 'avg')
# 5.4.2 填充和步幅
X = torch.arange(16, dtype=torch.float).view((1, 1, 4, 4))
X
pool2d = nn.MaxPool2d(3)
pool2d(X)
pool2d = nn.MaxPool2d(3, padding=1, stride=2)
pool2d(X)
pool2d = nn.MaxPool2d((2, 4), padding=(1, 2), stride=(2, 3))
pool2d(X)
# 5.4.3 多通道
X = torch.cat((X, X + 1), dim=1)
X
pool2d = nn.MaxPool2d(3, padding=1, stride=2)
pool2d(X)
| (X, po |
app.go | package main
import (
appd "appdynamics"
"fmt"
"math/rand"
"net/http"
"os"
"strconv"
"time"
)
func helloWorld(w http.ResponseWriter, r *http.Request) {
bt := appd.StartBT("/", "")
time.Sleep(time.Duration(rand.Intn(5)) * time.Millisecond)
fmt.Fprintf(w, "Hello World")
appd.EndBT(bt)
}
func main() | {
cfg := appd.Config{}
cfg.AppName = os.Getenv("APPD_APPLICATION_NAME")
cfg.TierName = os.Getenv("APPD_TIER_NAME")
cfg.NodeName = os.Getenv("APPD_NODE_NAME") + ":" + os.Getenv("CF_INSTANCE_INDEX")
port, err := strconv.ParseInt(os.Getenv("APPD_CONTROLLER_PORT"), 10, 16)
if err != nil {
port = 8080
}
cfg.Controller.Host = os.Getenv("APPD_CONTROLLER_HOST")
cfg.Controller.Port = uint16(port)
cfg.Controller.Account = os.Getenv("APPD_ACCOUNT_NAME")
cfg.Controller.AccessKey = os.Getenv("APPD_ACCOUNT_ACCESS_KEY")
cfg.InitTimeoutMs = 1000
err = appd.InitSDK(&cfg)
if err != nil {
fmt.Println(err)
}
http.HandleFunc("/", helloWorld)
http.ListenAndServe(":8080", nil)
} |
|
check.py | import os.path
from app.data.database import init_db, db_path, get_expected_pathname, set_path
def db_exists():
return os.path.isfile(db_path)
def check_db():
global db_path
if (db_path != get_expected_pathname()):
print('DB Check: Running backup')
backup_database_to(get_expected_pathname())
init_db()
if (not db_exists()):
print('DB Check: No database found. Making a new one...')
init_db()
from app.data.camper_editing import reset_locs
reset_locs()
def | (filename):
global db_path
from shutil import copy2
s = open('data/BACKUPDATA', 'a+')
s.seek(0)
prev_path = s.read()
set_path(filename)
db_path = filename #this line is a crude fix for some messy scoping
s.truncate(0)
s.seek(0)
s.write(filename)
if (prev_path == ""):
print("No previous database found, a new one will be generated. This may happen if the BACKUPDATA file is missing or corrupt.")
return False
elif (prev_path == filename):
print("Tried to back up to the same file!")
else:
print ("backing up & copying")
from app.data.camper_editing import reset_locs
copy2(prev_path, filename)
reset_locs()
return filename
| backup_database_to |
main.go | /*
Author: suguo.yao([email protected])
Description: 用于从myschools.me中下载标准组件
*/
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
)
const GITEA = "https://myschools.me"
const PATH = "/suguo/norm/src/branch/master"
var path string
func init() {
flag.StringVar(&path, "dl", "", "下载组件名")
}
func main() {
flag.Parse()
if path == "" {
panic("下载组件名指定")
}
var client http.Client
var wg sync.WaitGroup
start := time.Now()
dl(client, path, &wg)
wg.Wait()
fmt.Printf("total time: %.2f s\n", float64(time.Since(start))/float64(time.Second))
}
// get all file link and download it
func dl(client http.Client, path string, wg *sync.WaitGroup) {
if !isExist(path) {
os.MkdirAll(path, 0775)
}
url := fmt.Sprintf("%s%s/%s", GITEA, PATH, path)
html, err := getHtml(client, url)
if err != nil {
fmt.Printf("get html error: %s", err.Error())
return
}
urlPattern := regexp.MustCompile(fmt.Sprintf(`%s/%s/\S*go`, PATH, path))
links := urlPattern.FindAllSubmatch(html, -1)
for _, link := range links {
tmp := strings.Split(string(link[0]), "/")
filename := tmp[len(tmp)-1]
wg.Add(1)
go downloadFile(client, path, filename, wg)
}
}
// download file
func downloadFile(client http.Client, path, filename string, wg *sync.WaitGroup) {
defer wg.Done()
fmt.Println("start to download: ", filename)
fileURL := fmt.Sprintf("%s/suguo/norm/raw/branch/master/%s/%s", GITEA, path, filename)
resp, err := client.Get(fileURL)
if err != nil {
fmt.Printf("download file %s failed due to: %s\n", filename, err.Error())
return
}
defer resp.Body.Close()
var buff [1024]byte
// 创建文件
file, err := os.Create(filepath.Join(path, filename))
if err != nil {
fmt.Printf("create file: %s error\n", filename)
return
}
defer file.Close()
// 写入文件
for {
n, err := resp.Body.Read(buff[:])
if err != nil {
if err == io.EOF {
file.Write(buff[:n])
break
}
fmt.Println("error: ", err)
os.Remove(filepath.Join(path, filename))
return
}
file.Write(buff[:n])
}
fmt.Println("finish download:", filename)
}
// get html source
func getHtml(client http.Client, url string) ([]byte, error) {
r | r := client.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return data, nil
}
// if file or directory exits
func isExist(path string) bool {
_, err := os.Stat(path)
return !os.IsNotExist(err)
}
| esp, er |
index.ts | import { jsonrpc } from './_request'
import { DexOrderBNToString, Pair, Server as ServerInterface } from '../../types'
import { Wallet } from '../../types'
import { getTimestamp } from '../../utils/helper'
import { personalSign } from '../../utils/sign'
export class | {
private _url: string
private _wallet: Wallet
constructor(url: string, wallet: Wallet) {
this._url = url
this._wallet = wallet
}
private async getToken(params: ServerInterface.GetTokenParams): Promise<string> {
return jsonrpc.get(this._url, {}, 'auth.getToken', [params]).then(data => {
return data.token
})
}
private async _getHeader() {
let obj = { timestamp: 0, token: '' }
let isRequesting = false
const timestamp = getTimestamp()
// 因每一个 Tokenlon instantce 都有各自的 JWT Token
// 并且 SDK 用于自动化交易,过程中没有切换节点需要更新JWT Token的情况
// 因此使用定期提前更新 JWT Token 的方式来避免 JWT Token 的过期
if (!isRequesting && (!obj.timestamp || obj.timestamp < timestamp - 3600)) {
const signature = personalSign(this._wallet.privateKey, timestamp.toString())
isRequesting = true
try {
const token = await this.getToken({ timestamp, signature })
obj = { timestamp, token }
} catch (e) {
}
isRequesting = false
}
return { 'access-token': obj.token }
}
async getPairList(): Promise<Pair.ExchangePair[]> {
const header = await this._getHeader()
return jsonrpc.get(this._url, header, 'dex.getPairList', [{ market: 'Tokenlon' }]).then(data => {
const res = data || []
return res.filter(p => p.tradingEnabled)
})
}
async getOrderBook(params: ServerInterface.GetOrderBookParams): Promise<ServerInterface.OrderBookResult> {
const header = await this._getHeader()
return jsonrpc.get(this._url, header, 'dex.getOrderBook', [params]).then(res => {
const result = {
bids: [],
asks: [],
} as ServerInterface.OrderBookResult
if (res.bids && res.bids.length) {
result.bids = res.bids.sort((s, l) => l.rate - s.rate)
}
if (res.asks && res.asks.length > 1) {
result.asks = res.asks.sort((s, l) => s.rate - l.rate)
}
return result
})
}
async placeOrder(order: DexOrderBNToString): Promise<string> {
const header = await this._getHeader()
return jsonrpc.get(this._url, header, 'dex.placeOrder', [{
protocol: '0x',
order,
}])
}
async fillOrder(params: ServerInterface.FillOrderParams): Promise<string> {
const header = await this._getHeader()
return jsonrpc.get(this._url, header, 'dex.fillOrder', [{
protocol: '0x',
...params,
}])
}
async batchFillOrders(params: ServerInterface.BatchFillOrdersParams): Promise<string> {
const header = await this._getHeader()
return jsonrpc.get(this._url, header, 'dex.batchFillOrders', [{
protocol: '0x',
...params,
}])
}
async cancelOrders(params: string[]): Promise<string> {
const header = await this._getHeader()
return jsonrpc.get(this._url, header, 'dex.cancelOrders', params)
}
async cancelOrdersWithHash(params: ServerInterface.CancelOrderItem[]): Promise<string> {
const header = await this._getHeader()
return jsonrpc.get(this._url, header, 'dex.cancelOrdersWithHash', params)
}
async getOrders(params: ServerInterface.GetOrdersParams): Promise<ServerInterface.OrderBookItem[]> {
const header = await this._getHeader()
return jsonrpc.get(this._url, header, 'dex.getOrders', [params]).then(data => data || [])
}
async getMakerTrades(params: ServerInterface.MakerTradesParams): Promise<ServerInterface.MakerTradesItem[]> {
const header = await this._getHeader()
return jsonrpc.get(this._url, header, 'dex.getMakerTrades', [params]).then(data => data || [])
}
async getTakerTrades(params: ServerInterface.TakerTradesParams): Promise<ServerInterface.TakerTradesItem[]> {
const header = await this._getHeader()
return jsonrpc.get(this._url, header, 'dex.getTakerTrades', [params]).then(data => data || [])
}
} | Server |
testUsdMayaBlockSceneModificationContext.py | #!/pxrpythonsubst
#
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
import unittest
try:
from pxr import UsdMaya
except ImportError:
from pixar import UsdMaya
from maya import cmds
from maya import standalone
class testUsdMayaBlockSceneModificationContext(unittest.TestCase):
@classmethod
def setUpClass(cls):
standalone.initialize('usd')
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
def _AssertSceneIsModified(self, modified):
isSceneModified = cmds.file(query=True, modified=True)
self.assertEqual(isSceneModified, modified)
def setUp(self):
|
def testPreserveSceneModified(self):
"""
Tests that making scene modifications using a
UsdMayaBlockSceneModificationContext on a scene that has already been
modified correctly maintains the modification status after the context
exits.
"""
# Create a cube to dirty the scene.
cmds.polyCube()
self._AssertSceneIsModified(True)
with UsdMaya.BlockSceneModificationContext():
# Create a cube inside the context manager.
cmds.polyCube()
# The scene should still be modified.
self._AssertSceneIsModified(True)
def testPreserveSceneNotModified(self):
"""
Tests that making scene modifications using a
UsdMayaBlockSceneModificationContext on a scene that has not been
modified correctly maintains the modification status after the context
exits.
"""
with UsdMaya.BlockSceneModificationContext():
# Create a cube inside the context manager.
cmds.polyCube()
# The scene should NOT be modified.
self._AssertSceneIsModified(False)
if __name__ == '__main__':
unittest.main(verbosity=2)
| cmds.file(new=True, force=True)
self._AssertSceneIsModified(False) |
main.py | from celery_app import * |
||
schema.ts | import {
GraphQLObjectType,
GraphQLSchema,
GraphQLInt,
GraphQLString
} from 'graphql';
let count = 0;
let schema = new GraphQLSchema({
query: new GraphQLObjectType({
name: 'RootQueryType',
fields: {
count: {
type: GraphQLInt,
resolve: function() {
return count;
}
}
}
}), // query
mutation: new GraphQLObjectType({
name: 'RootMutationType',
fields: {
updateCount: {
type: GraphQLInt,
description: 'Update the count',
resolve: function() {
return ++count;
}
}
} | });
export default schema; | }) // mutation |
forms.py | # -*- coding: utf-8 -*-
"""Public forms."""
from flask_wtf import Form
from wtforms import PasswordField, StringField
from wtforms.validators import DataRequired
from xixi.user.models import User
class LoginForm(Form):
"""Login form."""
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
def __init__(self, *args, **kwargs):
"""Create instance."""
super(LoginForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
| """Validate the form."""
initial_validation = super(LoginForm, self).validate()
if not initial_validation:
return False
self.user = User.query.filter_by(username=self.username.data).first()
if not self.user:
self.username.errors.append('Unknown username')
return False
if not self.user.check_password(self.password.data):
self.password.errors.append('Invalid password')
return False
if not self.user.active:
self.username.errors.append('User not activated')
return False
return True |
|
wwCommandHead.py | from WonderPy.core.wwConstants import WWRobotConstants
from WonderPy.util import wwMath
from .wwCommandBase import WWCommandBase, do_not_call_within_connect_or_sensors
_rc = WWRobotConstants.RobotComponent
_rcv = WWRobotConstants.RobotComponentValues
_rp = WWRobotConstants.RobotProperties
class WWCommandHead(WWCommandBase):
_TIME_ANGLE = 0.2
_TIME_VOLTAGE = 0.6
def __init__(self, robot):
super(WWCommandHead, self).__init__(robot)
def stage_pan_angle(self, pan_degrees):
self._robot.stage_cmds(self.compose_angle(_rc.WW_COMMAND_HEAD_POSITION_PAN,
wwMath.coords_api_to_json_pan(pan_degrees)))
def stage_tilt_angle(self, tilt_degrees):
self._robot.stage_cmds(self.compose_angle(_rc.WW_COMMAND_HEAD_POSITION_TILT,
wwMath.coords_api_to_json_tilt(tilt_degrees)))
def stage_pan_tilt_angle(self, pan_degrees, tilt_degrees):
self.stage_pan_angle(pan_degrees)
self.stage_tilt_angle(tilt_degrees)
def stage_pan_voltage(self, pan_voltage_percent):
self._robot.stage_cmds(self.compose_voltage(_rc.WW_COMMAND_HEAD_PAN_VOLTAGE,
wwMath.coords_api_to_json_pan(pan_voltage_percent)))
def stage_tilt_voltage(self, tilt_voltage_percent):
self._robot.stage_cmds(self.compose_voltage(_rc.WW_COMMAND_HEAD_TILT_VOLTAGE,
wwMath.coords_api_to_json_tilt(tilt_voltage_percent)))
def stage_pan_tilt_voltage(self, pan_voltage_percent, tilt_voltage_percent):
self.stage_pan_voltage(pan_voltage_percent)
self.stage_tilt_voltage(tilt_voltage_percent) |
@do_not_call_within_connect_or_sensors
def do_pan_angle(self, pan_degrees, timeout=None):
self.stage_pan_angle(pan_degrees)
self._block_for_simple_timeout(self._TIME_ANGLE, timeout)
@do_not_call_within_connect_or_sensors
def do_tilt_angle(self, tilt_degrees, timeout=None):
self.stage_tilt_angle(tilt_degrees)
self._block_for_simple_timeout(self._TIME_ANGLE, timeout)
@do_not_call_within_connect_or_sensors
def do_pan_tilt_angle(self, pan_degrees, tilt_degrees, timeout=None):
self.stage_pan_tilt_angle(pan_degrees, tilt_degrees)
self._block_for_simple_timeout(0.2, timeout)
@do_not_call_within_connect_or_sensors
def do_pan_voltage(self, pan_voltage_percent, timeout=None):
self.stage_pan_voltage(pan_voltage_percent)
self._block_for_simple_timeout(self._TIME_VOLTAGE, timeout)
@do_not_call_within_connect_or_sensors
def do_tilt_voltage(self, tilt_voltage_percent, timeout=None):
self.stage_tilt_voltage(tilt_voltage_percent)
self._block_for_simple_timeout(self._TIME_VOLTAGE, timeout)
@do_not_call_within_connect_or_sensors
def do_pan_tilt_voltage(self, pan_voltage_percent, tilt_voltage_percent, timeout=None):
self.stage_pan_tilt_voltage(pan_voltage_percent, tilt_voltage_percent)
self._block_for_simple_timeout(self._TIME_VOLTAGE, timeout)
@staticmethod
def compose_angle(component_id, degrees):
args = {_rcv.WW_COMMAND_VALUE_ANGLE_DEGREE: degrees}
return {component_id: args}
@staticmethod
def compose_voltage(component_id, voltage_percent):
args = {_rcv.WW_COMMAND_VALUE_PERCENTAGE: voltage_percent}
return {component_id: args} | |
Worker.py | class Worker():
def __init__(self):
""" initialize Worker class
"""
self.cloudlets = []
self.bandwidth = {}
self.mips = int
self.position = int
self.timer = 0
def attachCloudlet(self, cloudlet):
""" attach cloudlet to worker
Args:
cloudlet (Cloudlet()): cludlet object
"""
self.cloudlets.append(cloudlet)
def run(self):
""" run the cloudlets on workers,
cloudlets with high_priority=1 have more priority
"""
for cloudlet in self.cloudlets:
if cloudlet.high_priority == 1:
self.execute(cloudlet)
for cloudlet in self.cloudlets:
if cloudlet.high_priority == 0:
self.execute(cloudlet)
def execute(self, cloudlet):
""" execute cloudlet on worker
Args:
cloudlet (Cloudlet()): Cloudlet to execute on Worker
Returns:
[int]: timer of the worker
"""
# calculate transfer time | cloudlet.transferTime = 0
else:
cloudlet.transferTime = cloudlet.size / self.bandwidth[cloudlet.position]
# calculate execution time
cloudlet.executionTime = cloudlet.instructions / self.mips
self.timer += cloudlet.executionTime + cloudlet.transferTime
return self.timer
def log(self):
""" print details of the transaction
"""
print('------------------WORKER--------------------')
print('position '+ str(self.position))
print('MIPS: '+ str(self.mips))
print('Duration: '+ str(self.timer))
for c in self.cloudlets:
# if c.position != 1:
# continue
print(
'From: '+ str(c.position)+
' | High priority: '+ str(c.high_priority)+
' | Instructions: '+ str(c.instructions)+' MI'
' | Size: '+ str(c.size)+' MB'
' | ExecutionTime: '+ str(round(c.executionTime))+' s'
' | TransferTime: '+ str(round(c.transferTime))+' s'
)
def clearHistory(self):
""" clear worker's timer
"""
self.timer = 0 | if cloudlet.position == self.position: |
progress.rs | #![doc(alias = "channel.hype_train.progress")]
//! A user responds to a poll on the specified channel
use super::*;
/// [`channel.hype_train.progress`](https://dev.twitch.tv/docs/eventsub/eventsub-subscription-types#channelpollprogress-beta): an user responds to a poll on the specified channel
#[derive(Clone, Debug, typed_builder::TypedBuilder, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "deny_unknown_fields", serde(deny_unknown_fields))]
#[non_exhaustive] | #[builder(setter(into))]
pub broadcaster_user_id: types::UserId,
}
impl EventSubscription for ChannelPollProgressV1 {
type Payload = ChannelPollProgressV1Payload;
const EVENT_TYPE: EventType = EventType::ChannelPollProgress;
#[cfg(feature = "twitch_oauth2")]
const SCOPE: &'static [twitch_oauth2::Scope] = &[twitch_oauth2::Scope::ChannelReadPolls];
const VERSION: &'static str = "1";
}
/// [`channel.hype_train.progress`](ChannelPollProgressV1) response payload.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "deny_unknown_fields", serde(deny_unknown_fields))]
#[non_exhaustive]
pub struct ChannelPollProgressV1Payload {
/// The Bits voting settings for the poll.
pub bits_voting: BitsVoting,
/// The requested broadcaster ID.
pub broadcaster_user_id: types::UserId,
/// The requested broadcaster login.
pub broadcaster_user_login: types::UserName,
/// The requested broadcaster display name.
pub broadcaster_user_name: types::DisplayName,
/// The Channel Points voting settings for the poll.
pub channel_points_voting: ChannelPointsVoting,
/// An array of choices for the poll. Includes vote counts.
pub choices: Vec<types::PollChoice>,
/// The time the poll will end.
pub ends_at: types::Timestamp,
/// ID of the poll.
pub id: types::PollId,
/// The time the poll started.
pub started_at: types::Timestamp,
/// Question displayed for the poll.
pub title: String,
}
#[cfg(test)]
#[test]
fn parse_payload() {
let payload = r##"
{
"subscription": {
"id": "f1c2a387-161a-49f9-a165-0f21d7a4e1c4",
"type": "channel.poll.progress",
"version": "1",
"status": "enabled",
"cost": 0,
"condition": {
"broadcaster_user_id": "1337"
},
"transport": {
"method": "webhook",
"callback": "https://example.com/webhooks/callback"
},
"created_at": "2019-11-16T10:11:12.123Z"
},
"event": {
"id": "1243456",
"broadcaster_user_id": "1337",
"broadcaster_user_login": "cool_user",
"broadcaster_user_name": "Cool_User",
"title": "Aren’t shoes just really hard socks?",
"choices": [
{"id": "123", "title": "Yeah!", "bits_votes": 5, "channel_points_votes": 7, "votes": 12},
{"id": "124", "title": "No!", "bits_votes": 10, "channel_points_votes": 4, "votes": 14},
{"id": "125", "title": "Maybe!", "bits_votes": 0, "channel_points_votes": 7, "votes": 7}
],
"bits_voting": {
"is_enabled": true,
"amount_per_vote": 10
},
"channel_points_voting": {
"is_enabled": true,
"amount_per_vote": 10
},
"started_at": "2020-07-15T17:16:03.17106713Z",
"ends_at": "2020-07-15T17:16:08.17106713Z"
}
}
"##;
let val = dbg!(crate::eventsub::Event::parse(payload).unwrap());
crate::tests::roundtrip(&val)
} | pub struct ChannelPollProgressV1 {
/// The broadcaster user ID of the channel for which “poll progress” notifications will be received. |
mod.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::atomic::{AtomicUsize, Ordering};
/// A load metric for all threads.
pub struct ThreadLoad {
term: AtomicUsize,
load: AtomicUsize,
threshold: usize,
}
impl ThreadLoad {
/// Constructs a new `ThreadLoad` with the specified threshold.
pub fn with_threshold(threshold: usize) -> Self {
ThreadLoad { | }
#[allow(dead_code)]
pub fn get_threshold(&self) -> usize {
// read-only
self.threshold
}
/// Returns true if the current load exceeds its threshold.
#[allow(dead_code)]
pub fn in_heavy_load(&self) -> bool {
self.load.load(Ordering::Relaxed) > self.threshold
}
/// Increases when updating `load`.
#[allow(dead_code)]
pub fn term(&self) -> usize {
self.term.load(Ordering::Relaxed)
}
/// Gets the current load. For example, 200 means the threads consuming 200% of the CPU resources.
#[allow(dead_code)]
pub fn load(&self) -> usize {
self.load.load(Ordering::Relaxed)
}
}
#[cfg(target_os = "linux")]
mod linux;
#[cfg(target_os = "linux")]
pub use self::linux::*;
#[cfg(not(target_os = "linux"))]
mod other_os {
use super::ThreadLoad;
use std::sync::Arc;
use std::time::Instant;
/// A dummy `ThreadLoadStatistics` implementation for non-Linux platforms
pub struct ThreadLoadStatistics {}
impl ThreadLoadStatistics {
/// Constructs a new `ThreadLoadStatistics`.
pub fn new(_slots: usize, _prefix: &str, _thread_load: Arc<ThreadLoad>) -> Self {
ThreadLoadStatistics {}
}
/// Designate target thread count of this collector.
pub fn set_thread_target(&mut self, _target: usize) {}
/// Records current thread load statistics.
pub fn record(&mut self, _instant: Instant) {}
}
}
#[cfg(not(target_os = "linux"))]
pub use self::other_os::ThreadLoadStatistics; | term: AtomicUsize::new(0),
load: AtomicUsize::new(0),
threshold,
} |
ls.go | package cli
import (
"context"
"strings"
"github.com/spf13/cobra"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/types"
cmdutil "github.com/oam-dev/kubevela/pkg/utils/util"
"github.com/oam-dev/kubevela/references/apiserver/apis"
"github.com/oam-dev/kubevela/references/appfile"
"github.com/oam-dev/kubevela/references/common"
)
// NewListCommand creates `ls` command and its nested children command
func NewListCommand(c types.Args, ioStreams cmdutil.IOStreams) *cobra.Command {
ctx := context.Background()
cmd := &cobra.Command{
Use: "ls",
Aliases: []string{"list"},
DisableFlagsInUseLine: true,
Short: "List services",
Long: "List services of all applications",
Example: `vela ls`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
return c.SetConfig()
},
RunE: func(cmd *cobra.Command, args []string) error {
env, err := GetEnv(cmd)
if err != nil {
return err
}
newClient, err := client.New(c.Config, client.Options{Scheme: c.Schema})
if err != nil {
return err
}
appName, err := cmd.Flags().GetString(App)
if err != nil {
return err
}
printComponentList(ctx, newClient, appName, env, ioStreams)
return nil
},
Annotations: map[string]string{
types.TagCommandType: types.TypeApp,
},
}
cmd.PersistentFlags().StringP(App, "", "", "specify the name of application")
return cmd | ioStreams cmdutil.IOStreams) {
deployedComponentList, err := common.ListComponents(ctx, c, common.Option{
AppName: appName,
Namespace: env.Namespace,
})
if err != nil {
ioStreams.Infof("listing services: %s\n", err)
return
}
fetcher := func(name string) (*v1alpha2.Application, error) {
var app = new(v1alpha2.Application)
err := c.Get(ctx, client.ObjectKey{Name: name, Namespace: env.Namespace}, app)
return app, err
}
all := mergeStagingComponents(deployedComponentList, env, ioStreams, fetcher)
table := newUITable()
table.AddRow("SERVICE", "APP", "TYPE", "TRAITS", "STATUS", "CREATED-TIME")
for _, a := range all {
traitAlias := strings.Join(a.TraitNames, ",")
table.AddRow(a.Name, a.App, a.WorkloadName, traitAlias, a.Status, a.CreatedTime)
}
ioStreams.Info(table.String())
}
func mergeStagingComponents(deployed []apis.ComponentMeta, env *types.EnvMeta, ioStreams cmdutil.IOStreams, fetcher func(name string) (*v1alpha2.Application, error)) []apis.ComponentMeta {
localApps, err := appfile.List(env.Name)
if err != nil {
ioStreams.Error("list application err", err)
return deployed
}
var all []apis.ComponentMeta
for _, app := range localApps {
appl, err := fetcher(app.Name)
if err != nil {
ioStreams.Errorf("fetch app %s err %v\n", app.Name, err)
continue
}
for _, c := range appl.Spec.Components {
traits := []string{}
for _, t := range c.Traits {
traits = append(traits, t.Name)
}
compMeta, exist := GetCompMeta(deployed, app.Name, c.Name)
if !exist {
all = append(all, apis.ComponentMeta{
Name: c.Name,
App: app.Name,
WorkloadName: c.WorkloadType,
TraitNames: traits,
Status: types.StatusStaging,
CreatedTime: app.CreateTime.String(),
})
continue
}
compMeta.TraitNames = traits
compMeta.WorkloadName = c.WorkloadType
if appl.Status.Phase != v1alpha2.ApplicationRunning {
compMeta.Status = types.StatusStaging
}
all = append(all, compMeta)
}
}
return all
}
// GetCompMeta gets meta of a component
func GetCompMeta(deployed []apis.ComponentMeta, appName, compName string) (apis.ComponentMeta, bool) {
for _, v := range deployed {
if v.Name == compName && v.App == appName {
return v, true
}
}
return apis.ComponentMeta{}, false
} | }
func printComponentList(ctx context.Context, c client.Reader, appName string, env *types.EnvMeta, |
ch2_store_fault.rs | #![no_std]
#![no_main]
#![feature(llvm_asm)]
#[macro_use]
extern crate user_lib;
#[no_mangle]
fn main() -> i32 | {
println!("Into Test store_fault, we will insert an invalid store operation...");
println!("Kernel should kill this application!");
unsafe { (0x0 as *mut u8).write_volatile(0); }
0
} |
|
main.go | package main
import (
"fmt"
"os"
"gorm.io/driver/postgres"
"gorm.io/gorm"
)
func | () {
dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s", os.Getenv("DBHOST"), os.Getenv("DBPORT"), os.Getenv("DBUSER"), os.Getenv("DBPASS"), os.Getenv("DBNAME"), os.Getenv("SSLMODE"))
fmt.Println("dsn:", dsn)
db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{})
if err != nil {
panic("failed to connect database")
}
fmt.Println("Connected. db.Error", db.Error)
// TODO: Enjoy!
}
| main |
operations.rs | #![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub mod dimensions {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn list(
operation_config: &crate::OperationConfig,
scope: &str,
filter: Option<&str>,
expand: Option<&str>,
skiptoken: Option<&str>,
top: Option<i64>,
) -> std::result::Result<DimensionsListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/dimensions",
operation_config.base_path(),
scope
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(expand) = expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
if let Some(skiptoken) = skiptoken {
url.query_pairs_mut().append_pair("$skiptoken", skiptoken);
}
if let Some(top) = top {
url.query_pairs_mut().append_pair("$top", top.to_string().as_str());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DimensionsListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod query {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn usage(
operation_config: &crate::OperationConfig,
scope: &str,
parameters: &QueryDefinition,
) -> std::result::Result<QueryResult, usage::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/query",
operation_config.base_path(),
scope
);
let mut url = url::Url::parse(url_str).context(usage::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(usage::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(usage::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(usage::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: QueryResult =
serde_json::from_slice(rsp_body).context(usage::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).context(usage::DeserializeError { body: rsp_body.clone() })?;
usage::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod usage {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod exports {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn list(operation_config: &crate::OperationConfig, scope: &str) -> std::result::Result<ExportListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/exports",
operation_config.base_path(),
scope
);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ExportListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get(operation_config: &crate::OperationConfig, scope: &str, export_name: &str) -> std::result::Result<Export, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/exports/{}",
operation_config.base_path(),
scope,
export_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Export = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
scope: &str,
export_name: &str,
parameters: &Export,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/exports/{}",
operation_config.base_path(),
scope,
export_name
);
let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Export =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: Export =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(Export),
Created201(Export),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
scope: &str,
export_name: &str,
) -> std::result::Result<(), delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/exports/{}",
operation_config.base_path(),
scope,
export_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn execute(
operation_config: &crate::OperationConfig,
scope: &str,
export_name: &str,
) -> std::result::Result<(), execute::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/exports/{}/run",
operation_config.base_path(),
scope,
export_name
);
let mut url = url::Url::parse(url_str).context(execute::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(execute::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(execute::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(execute::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).context(execute::DeserializeError { body: rsp_body.clone() })?;
execute::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod execute {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get_execution_history(
operation_config: &crate::OperationConfig,
scope: &str,
export_name: &str,
) -> std::result::Result<ExportExecutionListResult, get_execution_history::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/exports/{}/runHistory",
operation_config.base_path(),
scope,
export_name
);
let mut url = url::Url::parse(url_str).context(get_execution_history::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get_execution_history::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get_execution_history::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(get_execution_history::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => |
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).context(get_execution_history::DeserializeError { body: rsp_body.clone() })?;
get_execution_history::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get_execution_history {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod operations {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/providers/Microsoft.CostManagement/operations", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: OperationListResult =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
| {
let rsp_body = rsp.body();
let rsp_value: ExportExecutionListResult =
serde_json::from_slice(rsp_body).context(get_execution_history::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
} |
temp.py | """
The temp module provides a NamedTemporaryFile that can be reopened in the same
process on any platform. Most platforms use the standard Python
tempfile.NamedTemporaryFile class, but Windows users are given a custom class.
This is needed because the Python implementation of NamedTemporaryFile uses the
O_TEMPORARY flag under Windows, which prevents the file from being reopened
if the same flag is not provided [1][2]. Note that this does not address the
more general issue of opening a file for writing and reading in multiple
processes in a manner that works across platforms.
Also note that the custom version of NamedTemporaryFile does not support the
full range of keyword arguments available in Python 2.6+ and 3.0+.
1: https://mail.python.org/pipermail/python-list/2005-December/336958.html
2: http://bugs.python.org/issue14243
"""
import os
import tempfile
from django.core.files.utils import FileProxyMixin
__all__ = ('NamedTemporaryFile', 'gettempdir',)
if os.name == 'nt':
class TemporaryFile(FileProxyMixin):
"""
Temporary file object constructor that supports reopening of the
temporary file in Windows.
Note that unlike tempfile.NamedTemporaryFile from the standard library,
__init__() does not support the 'delete' keyword argument in
Python 2.6+, or the 'delete', 'buffering', 'encoding', or 'newline'
keyword arguments in Python 3.0+.
"""
def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='', dir=None):
fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
self.name = name
self.file = os.fdopen(fd, mode, bufsize)
self.close_called = False
# Because close can be called during shutdown
# we need to cache os.unlink and access it
# as self.unlink only
unlink = os.unlink
def | (self):
if not self.close_called:
self.close_called = True
try:
self.file.close()
except (OSError, IOError):
pass
try:
self.unlink(self.name)
except (OSError):
pass
@property
def closed(self):
"""
This attribute needs to be accessible in certain situations,
because this class is supposed to mock the API of the class
tempfile.NamedTemporaryFile in the Python standard library.
"""
return self.file.closed
def __del__(self):
self.close()
def __enter__(self):
self.file.__enter__()
return self
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
NamedTemporaryFile = TemporaryFile
else:
NamedTemporaryFile = tempfile.NamedTemporaryFile
gettempdir = tempfile.gettempdir
| close |
iotwireless.py | # Copyright (c) 2012-2021, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 31.0.0
from troposphere import Tags
from . import AWSObject, AWSProperty
from .validators import boolean, integer
class Destination(AWSObject):
resource_type = "AWS::IoTWireless::Destination"
props = {
"Description": (str, False),
"Expression": (str, True),
"ExpressionType": (str, True),
"Name": (str, True),
"RoleArn": (str, True),
"Tags": (Tags, False),
}
class LoRaWANDeviceProfile(AWSProperty):
props = {
"ClassBTimeout": (integer, False),
"ClassCTimeout": (integer, False),
"MacVersion": (str, False),
"MaxDutyCycle": (integer, False),
"MaxEirp": (integer, False),
"PingSlotDr": (integer, False),
"PingSlotFreq": (integer, False),
"PingSlotPeriod": (integer, False),
"RegParamsRevision": (str, False),
"RfRegion": (str, False),
"Supports32BitFCnt": (boolean, False),
"SupportsClassB": (boolean, False),
"SupportsClassC": (boolean, False),
"SupportsJoin": (boolean, False),
}
class DeviceProfile(AWSObject):
resource_type = "AWS::IoTWireless::DeviceProfile"
props = {
"LoRaWAN": (LoRaWANDeviceProfile, False),
"Name": (str, False),
"Tags": (Tags, False),
}
class LoRaWANServiceProfile(AWSProperty):
props = {
"AddGwMetadata": (boolean, False),
"ChannelMask": (str, False),
"DevStatusReqFreq": (integer, False),
"DlBucketSize": (integer, False),
"DlRate": (integer, False),
"DlRatePolicy": (str, False),
"DrMax": (integer, False),
"DrMin": (integer, False),
"HrAllowed": (boolean, False),
"MinGwDiversity": (integer, False),
"NwkGeoLoc": (boolean, False),
"PrAllowed": (boolean, False),
"RaAllowed": (boolean, False),
"ReportDevStatusBattery": (boolean, False),
"ReportDevStatusMargin": (boolean, False),
"TargetPer": (integer, False),
"UlBucketSize": (integer, False),
"UlRate": (integer, False),
"UlRatePolicy": (str, False),
}
class ServiceProfile(AWSObject):
resource_type = "AWS::IoTWireless::ServiceProfile"
props = {
"LoRaWAN": (LoRaWANServiceProfile, False),
"Name": (str, False),
"Tags": (Tags, False),
}
class SessionKeysAbpV10x(AWSProperty):
props = {
"AppSKey": (str, True),
"NwkSKey": (str, True),
}
class AbpV10x(AWSProperty):
props = {
"DevAddr": (str, True),
"SessionKeys": (SessionKeysAbpV10x, True),
}
class SessionKeysAbpV11(AWSProperty):
props = {
"AppSKey": (str, True),
"FNwkSIntKey": (str, True),
"NwkSEncKey": (str, True),
"SNwkSIntKey": (str, True),
}
class AbpV11(AWSProperty):
props = {
"DevAddr": (str, True),
"SessionKeys": (SessionKeysAbpV11, True),
}
class OtaaV10x(AWSProperty):
props = {
"AppEui": (str, True),
"AppKey": (str, True),
}
class OtaaV11(AWSProperty):
props = {
"AppKey": (str, True),
"JoinEui": (str, True),
"NwkKey": (str, True),
}
class LoRaWANDevice(AWSProperty):
props = {
"AbpV10x": (AbpV10x, False),
"AbpV11": (AbpV11, False),
"DevEui": (str, False),
"DeviceProfileId": (str, False),
"OtaaV10x": (OtaaV10x, False),
"OtaaV11": (OtaaV11, False),
"ServiceProfileId": (str, False),
}
class | (AWSObject):
resource_type = "AWS::IoTWireless::WirelessDevice"
props = {
"Description": (str, False),
"DestinationName": (str, True),
"LastUplinkReceivedAt": (str, False),
"LoRaWAN": (LoRaWANDevice, False),
"Name": (str, False),
"Tags": (Tags, False),
"ThingArn": (str, False),
"Type": (str, True),
}
class LoRaWANGateway(AWSProperty):
props = {
"GatewayEui": (str, True),
"RfRegion": (str, True),
}
class WirelessGateway(AWSObject):
resource_type = "AWS::IoTWireless::WirelessGateway"
props = {
"Description": (str, False),
"LastUplinkReceivedAt": (str, False),
"LoRaWAN": (LoRaWANGateway, True),
"Name": (str, False),
"Tags": (Tags, False),
"ThingArn": (str, False),
}
| WirelessDevice |
test4.rs | // test4.rs
// This test covers the sections:
// - Modules
// - Macros
// Write a macro that passes the test! No hints this time, you can do it!
//
macro_rules! my_macro {
($val:expr) => {
"Hello ".to_owned() + $val
};
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_my_macro_world() {
assert_eq!(my_macro!("world!"), "Hello world!");
}
#[test]
fn | () {
assert_eq!(my_macro!("goodbye!"), "Hello goodbye!");
}
}
| test_my_macro_goodbye |
index.js | import 'bootstrap/dist/css/bootstrap.min.css';
import 'bootstrap/dist/js/bootstrap.bundle.min';
import './styles/three-dots/three-dots.scss';
// These can be imported and used, commenting out till when needed.
// import $ from 'jquery';
// import Popper from 'popper.js';
import React from 'react';
import ReactDOM from 'react-dom';
import 'react-web-vector-icons/fonts';
import './index.css';
import App from './App';
import * as serviceWorker from './serviceWorker';
import { createStore } from 'redux';
import { Provider } from 'react-redux';
import reducers from './reducers';
import middleware from './middleware';
import defaultData from './utils/default-data';
| <App />
</Provider>,
document.getElementById('root')
);
// If you want your app to work offline and load faster, you can change
// unregister() to register() below. Note this comes with some pitfalls.
// Learn more about service workers: https://bit.ly/CRA-PWA
serviceWorker.unregister(); | const store = createStore(reducers, defaultData, middleware);
ReactDOM.render(
<Provider store={store}> |
update-dependency-version.ts | import { Tree } from '@nrwl/devkit';
import {
getNxDotnetProjects,
getProjectFilesForProject,
iterateChildrenByPath,
readXml,
} from '@nx-dotnet/utils';
export async function updateDependencyVersions(
host: Tree,
packageName: string,
version: string,
) {
const projects = await getNxDotnetProjects(host);
for (const [projectName, configuration] of projects.entries()) {
const projectFiles = getProjectFilesForProject(
host,
configuration,
projectName,
);
for (const f of projectFiles) {
const xmldoc = readXml(host, f);
let updateFile = false;
await iterateChildrenByPath(
xmldoc,
'ItemGroup.PackageReference',
(reference) => {
if (
reference.attr['Include'] === packageName &&
reference.attr['Version'] !== version
) {
console.warn(
`Updating ${projectName} to use ${packageName} v${version}`, | updateFile = true;
}
},
);
if (updateFile) {
host.write(f, xmldoc.toString());
}
}
}
} | );
reference.attr['Version'] = version; |
build.rs | // build.rs
extern crate cc;
extern crate git2;
use std::env;
use std::path::Path;
use std::fs;
use std::io::Write;
use std::process::Command;
use git2::Repository;
fn main() |
fn add_cpp_files(build: &mut cc::Build, path: &Path) {
println!("{:#?}", path);
for e in path.read_dir().unwrap() {
println!("{:#?}", e);
let e = e.unwrap();
let path = e.path();
if e.file_type().unwrap().is_dir() {
} else if path.extension().and_then(|s| s.to_str()) == Some("cpp") {
build.file(&path);
}
}
}
| {
println!("cargo:rerun-if-changed=build.rs");
// Cloning the repo.
let url = "https://github.com/Microsoft/SEAL.git";
//let _repo = match Repository::clone(url, "./seal") {
// Ok(repo) => repo,
// Err(e) => panic!("Failed to clone SEAL: {}", e),
//};
// Configuring before building
// Setting working directory
let _res = match env::set_current_dir(Path::new("./seal/native/")) {
Ok(r) => r,
Err(e) => panic!("SEAL was not properly cloned: {}", e),
};
//Cmake
Command::new("cmake")
.arg("./src/")
.output()
.expect("failed to execute process");
// Resetting working directory
let _res = match env::set_current_dir(Path::new("../../")) {
Ok(r) => r,
Err(e) => panic!("Unable to clean after cmaking the repo: {}", e)
};
// Build SEAL
let mut build = cc::Build::new();
build.cpp(true);
build.flag_if_supported("-std=c++17");
build.flag_if_supported("-march=native");
build.flag_if_supported("-fkeep-inline-functions");
build.flag_if_supported("-fno-inline-functions");
let base_path = Path::new("./seal/native/src/seal/");
let util_base_path = Path::new("./seal/native/src/seal/util/");
add_cpp_files(&mut build, base_path);
add_cpp_files(&mut build, util_base_path);
build.include("./seal/native/src");
build.include("src/");
build.file("src/bindings.cpp");
build.compile("seal");
// Generate the bindings
let bindings = bindgen::Builder::default()
.generate_inline_functions(true)
.derive_default(true)
.header("src/bindings.h")
.clang_arg("-I./seal/native/src/")
.clang_arg("-std=c++17")
.clang_arg("-x")
.clang_arg("c++")
.opaque_type("std::.*")
.whitelist_type("seal::.*")
.whitelist_function("seal::.*")
.whitelist_type("bindings::.*")
.whitelist_function("bindings::.*")
.generate()
.expect("Unable to generate bindings");
let mut bindings_string = bindings
.to_string()
// Dirty hack
.replace("pub data_: seal_util_Pointer<T>", "pub data_: seal_util_Pointer<u64>");
let mut file = fs::File::create("./src/bindings.rs").unwrap();
file.write_all(bindings_string.as_bytes());
file.sync_data();
// Cleanup
//let _res = match fs::remove_dir_all("./seal") {
// Ok(r) => r,
// Err(e) => panic!("Unable to remove SEAL dir after build: {}", e)
//};
} |
traits.rs | trait Word {} |
||
modelconstruction.py | """
Functions for the construction of new models.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import collections as _collections
import itertools as _itertools
from os import stat
from pygsti.modelmembers.instruments.instrument import Instrument
import numpy as _np
import scipy as _scipy
import scipy.linalg as _spl
from pygsti.evotypes import Evotype as _Evotype
from pygsti.modelmembers import operations as _op
from pygsti.modelmembers import povms as _povm
from pygsti.modelmembers import states as _state
from pygsti.modelmembers import instruments as _instrument
from pygsti.modelmembers.operations import opfactory as _opfactory
from pygsti.models import stencillabel as _stencil
from pygsti.models.modelnoise import OpModelNoise as _OpModelNoise
from pygsti.models.modelnoise import OpModelPerOpNoise as _OpModelPerOpNoise
from pygsti.models.modelnoise import ComposedOpModelNoise as _ComposedOpModelNoise
from pygsti.models.modelnoise import LindbladNoise as _LindbladNoise
from pygsti.models.modelnoise import StochasticNoise as _StochasticNoise
from pygsti.models.modelnoise import DepolarizationNoise as _DepolarizationNoise
from pygsti.models import explicitmodel as _emdl
from pygsti.models import gaugegroup as _gg
from pygsti.models.localnoisemodel import LocalNoiseModel as _LocalNoiseModel
from pygsti.models.cloudnoisemodel import CloudNoiseModel as _CloudNoiseModel
from pygsti.baseobjs import label as _label
from pygsti.baseobjs import statespace as _statespace
from pygsti.baseobjs.basis import Basis as _Basis
from pygsti.baseobjs.basis import ExplicitBasis as _ExplicitBasis
from pygsti.baseobjs.basis import DirectSumBasis as _DirectSumBasis
from pygsti.baseobjs.qubitgraph import QubitGraph as _QubitGraph
from pygsti.tools import basistools as _bt
from pygsti.tools import internalgates as _itgs
from pygsti.tools import optools as _ot
from pygsti.tools import listtools as _lt
from pygsti.baseobjs.basisconstructors import sqrt2, id2x2, sigmax, sigmay, sigmaz
from pygsti.baseobjs.verbosityprinter import VerbosityPrinter as _VerbosityPrinter
from pygsti.tools.legacytools import deprecate as _deprecated_fn
#############################################
# Build gates based on "standard" gate names
############################################
def create_spam_vector(vec_expr, state_space, basis):
"""
Build a rho or E vector from an expression.
Parameters
----------
vec_expr : string
the expression which determines which vector to build. Currenlty, only
integers are allowed, which specify a the vector for the pure state of
that index. For example, "1" means return vectorize(``|1><1|``). The
index labels the absolute index of the state within the entire state
space, and is independent of the direct-sum decomposition of density
matrix space.
state_space : StateSpace
The state space that the created operation should act upon.
basis : str or Basis
The basis of the returned vector. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
Returns
-------
numpy array
The vector specified by vec_expr in the desired basis.
"""
#So far just allow integer prep_expressions that give the index of state (within the state space) that we
#prep/measure
try:
index = int(vec_expr)
except:
raise ValueError("Expression must be the index of a state (as a string)")
state_space = _statespace.StateSpace.cast(state_space)
if isinstance(basis, str):
basis = _Basis.cast(basis, state_space)
assert (state_space.dim == basis.dim), \
"State space labels dim (%s) != basis dim (%s)" % (state_space.dim, basis.dim)
#standard basis that has the same direct-sum structure as `basis`:
std_basis = basis.create_equivalent('std')
vecInSimpleStdBasis = _np.zeros(std_basis.elshape, 'd') # a matrix, but flattened it is our spamvec
vecInSimpleStdBasis[index, index] = 1.0 # now a matrix with just a single 1 on the diag
vecInReducedStdBasis = _np.dot(std_basis.from_elementstd_transform_matrix, vecInSimpleStdBasis.flatten())
# translates the density matrix / state vector to the std basis with our desired block structure
vec = _bt.change_basis(vecInReducedStdBasis, std_basis, basis)
return vec.reshape(-1, 1)
def create_identity_vec(basis):
"""
Build a the identity vector for a given space and basis.
Parameters
----------
basis : Basis object
The basis of the returned vector. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
Returns
-------
numpy array
The identity vector in the desired basis.
"""
opDim = basis.dim
if isinstance(basis, _DirectSumBasis):
blockDims = [c.dim for c in basis.component_bases]
else: blockDims = [opDim]
# assume index given as vec_expr refers to a Hilbert-space state index, so "reduced-std" basis
vecInReducedStdBasis = _np.zeros((opDim, 1), 'd')
#set all diagonal elements of density matrix to 1.0 (end result = identity density mx)
start = 0; vecIndex = 0
for blockVecDim in blockDims:
blockDim = int(_np.sqrt(blockVecDim)) # vec -> matrix dim
for i in range(start, start + blockDim):
for j in range(start, start + blockDim):
if i == j: vecInReducedStdBasis[vecIndex, 0] = 1.0 # set diagonal element of density matrix
vecIndex += 1
start += blockDim
return _bt.change_basis(vecInReducedStdBasis, "std", basis)
def create_operation(op_expr, state_space, basis="pp", parameterization="full", evotype='default'):
"""
Build an operation object from an expression.
Parameters
----------
op_expr : string
expression for the gate to build. String is first split into parts
delimited by the colon (:) character, which are composed together to
create the final gate. Each part takes on of the allowed forms:
- I(ssl_0, ...) = identity operation on one or more state space labels
(ssl_i)
- X(theta, ssl) = x-rotation by theta radians of qubit labeled by ssl
- Y(theta, ssl) = y-rotation by theta radians of qubit labeled by ssl
- Z(theta, ssl) = z-rotation by theta radians of qubit labeled by ssl
- CX(theta, ssl0, ssl1) = controlled x-rotation by theta radians. Acts
on qubit labeled by ssl1 with ssl0 being the control.
- CY(theta, ssl0, ssl1) = controlled y-rotation by theta radians. Acts
on qubit labeled by ssl1 with ssl0 being the control.
- CZ(theta, ssl0, ssl1) = controlled z-rotation by theta radians. Acts
on qubit labeled by ssl1 with ssl0 being the control.
- CNOT(ssl0, ssl1) = standard controlled-not gate. Acts on qubit
labeled by ssl1 with ssl0 being the control.
- CPHASE(ssl0, ssl1) = standard controlled-phase gate. Acts on qubit
labeled by ssl1 with ssl0 being the control.
- LX(theta, i0, i1) = leakage between states i0 and i1. Implemented as
an x-rotation between states with integer indices i0 and i1 followed
by complete decoherence between the states.
state_space : StateSpace
The state space that the created operation should act upon.
basis : str or Basis
The basis the returned operation should be represented in.
parameterization : {"full","TP","static"}, optional
How to parameterize the resulting gate.
- "full" = return a FullArbitraryOp.
- "TP" = return a FullTPOp.
- "static" = return a StaticArbitraryOp.
evotype : Evotype or str, optional
The evolution type of this operation, describing how states are
represented. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
Returns
-------
LinearOperator
A gate object representing the gate given by op_expr in the desired
basis.
"""
# op_expr can contain single qubit ops: X(theta) ,Y(theta) ,Z(theta)
# two qubit ops: CNOT
# clevel qubit ops: Leak
# two clevel opts: Flip
# each of which is given additional parameters specifying which indices it acts upon
#Working with a StateSpaceLabels object gives us access to all the info we'll need later
state_space = _statespace.StateSpace.cast(state_space)
if isinstance(basis, str):
basis = _Basis.cast(basis, state_space)
assert(state_space.dim == basis.dim), \
"State space labels dim (%s) != basis dim (%s)" % (state_space.dim, basis.dim)
# ------------------------------------------------------------------------------------------------------------------
# -- Helper Functions ----------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
def | (lbl):
""" Convert integer-strings to integers in state space label """
try: return int(lbl)
except: return lbl.strip()
def to_labels(lbls):
""" Convert integer-strings to integers in state space labels """
return [to_label(lbl) for lbl in lbls]
# ------------------------------------------------------------------------------------------------------------------
# -- End Helper Functions ------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
#FUTURE?: type_preferences = ('static standard', 'static clifford', 'static unitary')
build_evotype = 'default'
superop_mxs_in_basis = []
exprTerms = op_expr.split(':')
for exprTerm in exprTerms:
l = exprTerm.index('('); r = exprTerm.rindex(')')
opName = exprTerm[0:l]
argsStr = exprTerm[l + 1:r]
args = argsStr.split(',')
if opName == "I":
# qubit labels (TODO: what about 'L' labels? -- not sure if they work with this...)
labels = to_labels(args)
stateSpaceUDim = int(_np.product([state_space.label_udimension(l) for l in labels]))
# a complex 2x2 mx unitary for the identity in Pauli-product basis
Uop = _op.StaticUnitaryOp(_np.identity(stateSpaceUDim, 'complex'), 'pp', build_evotype)
#FUTURE?:
# stdname = 'Gi' if (stateSpaceUDim == 2) else None
# Uop = _op.create_from_unitary_mx(_np.identity(stateSpaceUDim, complex), type_preferences, 'pp',
# stdname=stdname, evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis
Uop_embed = _op.EmbeddedOp(state_space, labels, Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName == "D":
# like 'I', but only parameterize the diagonal elements - so can be a depolarization-type map
raise NotImplementedError("Removed temporarily - need to update using embedded gates")
# # qubit labels (TODO: what about 'L' labels? -- not sure if they work with this...)
# labels = to_labels(args)
# stateSpaceDim = sslbls.product_dim(labels)
# if parameterization not in ("linear","linearTP"):
# raise ValueError("'D' gate only makes sense to use when and parameterization == 'linear'")
# if defaultI2P == "TP":
# # parameterize only the diagonals els after the first
# indicesToParameterize = [ (i,i) for i in range(1,stateSpaceDim**2) ]
# else:
# # parameterize only the diagonals els
# indicesToParameterize = [ (i,i) for i in range(0,stateSpaceDim**2) ]
# # *real* 4x4 mx in Pauli-product basis -- still just the identity!
# pp_opMx = _np.identity(stateSpaceDim**2, 'd')
# # pp_opMx assumed to be in the Pauli-product basis
# opTermInFinalBasis = embed_operation(pp_opMx, tuple(labels), indicesToParameterize)
elif opName in ('X', 'Y', 'Z'): # single-qubit gate names
assert(len(args) == 2) # theta, qubit-index
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi})
label = to_label(args[1])
assert(state_space.label_dimension(label) == 4), "%s gate must act on qubits!" % opName
if opName == 'X': ex = -1j * theta * sigmax / 2
elif opName == 'Y': ex = -1j * theta * sigmay / 2
elif opName == 'Z': ex = -1j * theta * sigmaz / 2
# complex 2x2 unitary matrix operating on single qubit in Pauli-product basis
Uop = _op.StaticUnitaryOp(_spl.expm(ex), 'pp', build_evotype)
#FUTURE?:
#stdname = None
#if _np.isclose(theta, _np.pi): stdname = 'G%spi' % opName.lower()
#elif _np.isclose(theta, _np.pi/2): stdname = 'G%spi2' % opName.lower()
# Uop = _op.create_from_unitary_mx(_spl.expm(ex), type_preferences, 'pp', stdname=stdname, evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis
Uop_embed = _op.EmbeddedOp(state_space, (label,), Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName == 'N': # more general single-qubit gate
assert(len(args) == 5) # theta, sigmaX-coeff, sigmaY-coeff, sigmaZ-coeff, qubit-index
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
sxCoeff = eval(args[1], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
syCoeff = eval(args[2], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
szCoeff = eval(args[3], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
label = to_label(args[4])
assert(state_space.label_dimension(label) == 4), "%s gate must act on qubits!" % opName
ex = -1j * theta * (sxCoeff * sigmax / 2. + syCoeff * sigmay / 2. + szCoeff * sigmaz / 2.)
# complex 2x2 unitary matrix operating on single qubit in Pauli-product basis
Uop = _op.StaticUnitaryOp(_spl.expm(ex), 'pp', evotype=build_evotype)
#FUTURE?: Uop = _op.create_from_unitary_mx(_spl.expm(ex), type_preferences, 'pp', evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis
Uop_embed = _op.EmbeddedOp(state_space, (label,), Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName in ('CX', 'CY', 'CZ', 'CNOT', 'CPHASE'): # two-qubit gate names
if opName in ('CX', 'CY', 'CZ'):
assert(len(args) == 3) # theta, qubit-label1, qubit-label2
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi})
label1 = to_label(args[1]); label2 = to_label(args[2])
if opName == 'CX': ex = -1j * theta * sigmax / 2
elif opName == 'CY': ex = -1j * theta * sigmay / 2
elif opName == 'CZ': ex = -1j * theta * sigmaz / 2
Utarget = _spl.expm(ex) # 2x2 unitary matrix operating on target qubit
else: # opName in ('CNOT','CPHASE')
assert(len(args) == 2) # qubit-label1, qubit-label2
label1 = to_label(args[0]); label2 = to_label(args[1])
if opName == 'CNOT':
Utarget = _np.array([[0, 1],
[1, 0]], 'd')
elif opName == 'CPHASE':
Utarget = _np.array([[1, 0],
[0, -1]], 'd')
# 4x4 unitary matrix operating on isolated two-qubit space
U = _np.identity(4, 'complex'); U[2:, 2:] = Utarget
assert(state_space.label_dimension(label1) == 4 and state_space.label_dimension(label2) == 4), \
"%s gate must act on qubits!" % opName
# complex 4x4 unitary matrix operating on two-qubit in Pauli-product basis
Uop = _op.StaticUnitaryOp(U, 'pp', build_evotype)
#FUTURE?:
# if opName == "CNOT": stdname = "Gcnot"
# elif opName == "CPHASE": stdname = "Gcphase"
# else: stdname = None
# Uop = _op.create_from_unitary_mx(U, type_preferences, 'pp', stdname=stdname, evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space
Uop_embed = _op.EmbeddedOp(state_space, [label1, label2], Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName == "LX": # TODO - better way to describe leakage?
assert(len(args) == 3) # theta, dmIndex1, dmIndex2 - X rotation between any two density matrix basis states
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi})
i1 = int(args[1]) # row/column index of a single *state* within the density matrix
i2 = int(args[2]) # row/column index of a single *state* within the density matrix
ex = -1j * theta * sigmax / 2
Uop = _spl.expm(ex) # 2x2 unitary matrix operating on the i1-th and i2-th states of the state space basis
opDim = basis.dim
dmDim = int(_np.sqrt(basis.elsize)) # matrix dim of the "embedding space"
if isinstance(basis, _DirectSumBasis):
blockDims = [c.dim for c in basis.component_bases]
else: blockDims = [opDim]
Utot = _np.identity(dmDim, 'complex')
Utot[i1, i1] = Uop[0, 0]
Utot[i1, i2] = Uop[0, 1]
Utot[i2, i1] = Uop[1, 0]
Utot[i2, i2] = Uop[1, 1]
# dmDim^2 x dmDim^2 mx operating on vectorized total densty matrix
opTermInStdBasis = _ot.unitary_to_process_mx(Utot)
# contract [3] to [2, 1]
embedded_std_basis = _Basis.cast('std', 9) # [2]
std_basis = _Basis.cast('std', blockDims) # std basis w/blockdim structure, i.e. [4,1]
opTermInReducedStdBasis = _bt.resize_std_mx(opTermInStdBasis, 'contract',
embedded_std_basis, std_basis)
superop_mx_in_basis = _bt.change_basis(opTermInReducedStdBasis, std_basis, basis)
else: raise ValueError("Invalid gate name: %s" % opName)
superop_mxs_in_basis.append(superop_mx_in_basis)
#Note: expressions are listed in "matrix composition order"
final_superop_mx = superop_mxs_in_basis[0]
for mx in superop_mxs_in_basis[1:]:
final_superop_mx = _np.dot(final_superop_mx, mx)
if basis.real:
assert(_np.linalg.norm(final_superop_mx.imag) < 1e-6), "Operation matrix should be real but isn't!"
final_superop_mx = _np.real(final_superop_mx)
return _op.create_from_superop_mx(final_superop_mx, parameterization, basis,
evotype=evotype, state_space=state_space)
def _create_explicit_model_from_expressions(state_space, basis,
op_labels, op_expressions,
prep_labels=('rho0',), prep_expressions=('0',),
effect_labels='standard', effect_expressions='standard',
povm_labels='Mdefault', gate_type="full", prep_type="auto",
povm_type="auto", instrument_type="auto", evotype='default'):
"""
Build a new Model given lists of operation labels and expressions.
Parameters
----------
state_space : StateSpace
The state space for this model.
basis : Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
op_labels : list of strings
A list of labels for each created gate in the final model. To
conform with text file parsing conventions these names should begin
with a capital G and can be followed by any number of lowercase
characters, numbers, or the underscore character.
op_expressions : list of strings
A list of gate expressions, each corresponding to a operation label in
op_labels, which determine what operation each gate performs (see
documentation for :meth:`create_operation`).
prep_labels : list of string, optional
A list of labels for each created state preparation in the final
model. To conform with conventions these labels should begin with
"rho".
prep_expressions : list of strings, optional
A list of vector expressions for each state preparation vector (see
documentation for :meth:`_create_spam_vector`).
effect_labels : list, optional
If `povm_labels` is a string, then this is just a list of the effect
(outcome) labels for the single POVM. If `povm_labels` is a tuple,
then `effect_labels` must be a list of lists of effect labels, each
list corresponding to a POVM. If set to the special string `"standard"`
then the length-n binary strings are used when the state space consists
of n qubits (e.g. `"000"`, `"001"`, ... `"111"` for 3 qubits) and
the labels `"0"`, `"1"`, ... `"<dim>"` are used, where `<dim>`
is the dimension of the state space, in all non-qubit cases.
effect_expressions : list, optional
A list or list-of-lists of (string) vector expressions for each POVM
effect vector (see documentation for :meth:`_create_spam_vector`). Expressions
correspond to labels in `effect_labels`. If set to the special string
`"standard"`, then the expressions `"0"`, `"1"`, ... `"<dim>"` are used,
where `<dim>` is the dimension of the state space.
povm_labels : list or string, optional
A list of POVM labels, or a single (string) label. In the latter case,
only a single POVM is created and the format of `effect_labels` and
`effect_expressions` is simplified (see above).
parameterization : {"full","TP","static"}, optional
How to parameterize the gates of the resulting Model (see
documentation for :meth:`create_operation`).
evotype : Evotype or str, optional
The evolution type of this model, describing how states are
represented. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
Returns
-------
Model
The created model.
"""
#defP = "TP" if (parameterization in ("TP","linearTP")) else "full"
state_space = _statespace.StateSpace.cast(state_space)
ret = _emdl.ExplicitOpModel(state_space, basis.copy(), default_gate_type=gate_type,
default_prep_type=prep_type, default_povm_type=povm_type,
default_instrument_type=instrument_type, evotype=evotype)
#prep_prefix="rho", effect_prefix="E", gate_prefix="G")
if prep_type == "auto":
prep_type = _state.state_type_from_op_type(gate_type)
if povm_type == "auto":
povm_type = _povm.povm_type_from_op_type(gate_type)
if instrument_type == "auto":
instrument_type = _instrument.instrument_type_from_op_type(gate_type)
for label, rhoExpr in zip(prep_labels, prep_expressions):
vec = create_spam_vector(rhoExpr, state_space, basis)
ret.preps[label] = _state.create_from_dmvec(vec, prep_type, basis, evotype, state_space)
if isinstance(povm_labels, str):
povm_labels = [povm_labels]
effect_labels = [effect_labels]
effect_expressions = [effect_expressions]
dmDim = int(_np.sqrt(basis.dim)) # "densitymx" evotype assumed... FIX?
for povmLbl, ELbls, EExprs in zip(povm_labels,
effect_labels, effect_expressions):
effect_vecs = {}
if ELbls == "standard":
qubit_dim = 4
if state_space.num_tensor_product_blocks == 1 and \
all([ldim == qubit_dim for ldim in state_space.tensor_product_block_dimensions(0)]):
# a single tensor product block comprised of qubits: '000', '001', etc.
nQubits = len(state_space.tensor_product_block_dimensions(0))
ELbls = [''.join(t) for t in _itertools.product(('0', '1'), repeat=nQubits)]
else:
ELbls = list(map(str, range(dmDim))) # standard = 0,1,...,dmDim
if EExprs == "standard":
EExprs = list(map(str, range(dmDim))) # standard = 0,1,...,dmDim
effect_vecs = {label: create_spam_vector(expr, state_space, basis)
for label, expr in zip(ELbls, EExprs)}
if len(effect_vecs) > 0: # don't add POVMs with 0 effects
ret.povms[povmLbl] = _povm.create_from_dmvecs(effect_vecs, povm_type, basis, evotype, state_space)
for (opLabel, opExpr) in zip(op_labels, op_expressions):
ret.operations[opLabel] = create_operation(opExpr, state_space, basis, gate_type, evotype)
if gate_type == "full":
ret.default_gauge_group = _gg.FullGaugeGroup(ret.state_space, evotype)
elif gate_type == "full TP":
ret.default_gauge_group = _gg.TPGaugeGroup(ret.state_space, evotype)
elif gate_type == 'CPTP':
ret.default_gauge_group = _gg.UnitaryGaugeGroup(ret.state_space, basis, evotype)
else:
ret.default_gauge_group = _gg.TrivialGaugeGroup(ret.state_space)
ret._clean_paramvec()
return ret
def create_explicit_model_from_expressions(state_space,
op_labels, op_expressions,
prep_labels=('rho0',), prep_expressions=('0',),
effect_labels='standard', effect_expressions='standard',
povm_labels='Mdefault', basis="auto", gate_type="full",
prep_type="auto", povm_type="auto", instrument_type="auto",
evotype='default'):
"""
Build a new :class:`ExplicitOpModel` given lists of labels and expressions.
Parameters
----------
state_space : StateSpace
the state space for the model.
op_labels : list of strings
A list of labels for each created gate in the final model. To
conform with text file parsing conventions these names should begin
with a capital G and can be followed by any number of lowercase
characters, numbers, or the underscore character.
op_expressions : list of strings
A list of gate expressions, each corresponding to a operation label in
op_labels, which determine what operation each gate performs (see
documentation for :meth:`create_operation`).
prep_labels : list of string
A list of labels for each created state preparation in the final
model. To conform with conventions these labels should begin with
"rho".
prep_expressions : list of strings
A list of vector expressions for each state preparation vector (see
documentation for :meth:`_create_spam_vector`).
effect_labels : list, optional
If `povm_labels` is a string, then this is just a list of the effect
(outcome) labels for the single POVM. If `povm_labels` is a tuple,
then `effect_labels` must be a list of lists of effect labels, each
list corresponding to a POVM. If set to the special string `"standard"`
then the length-n binary strings are used when the state space consists
of n qubits (e.g. `"000"`, `"001"`, ... `"111"` for 3 qubits) and
the labels `"0"`, `"1"`, ... `"<dim>"` are used, where `<dim>`
is the dimension of the state space, in all non-qubit cases.
effect_expressions : list, optional
A list or list-of-lists of (string) vector expressions for each POVM
effect vector (see documentation for :meth:`_create_spam_vector`). Expressions
correspond to labels in `effect_labels`. If set to the special string
`"standard"`, then the expressions `"0"`, `"1"`, ... `"<dim>"` are used,
where `<dim>` is the dimension of the state space.
povm_labels : list or string, optional
A list of POVM labels, or a single (string) label. In the latter case,
only a single POVM is created and the format of `effect_labels` and
`effect_expressions` is simplified (see above).
basis : {'gm','pp','std','qt','auto'}, optional
the basis of the matrices in the returned Model
- "std" = operation matrix operates on density mx expressed as sum of matrix
units
- "gm" = operation matrix operates on dentity mx expressed as sum of
normalized Gell-Mann matrices
- "pp" = operation matrix operates on density mx expresses as sum of
tensor-product of Pauli matrices
- "qt" = operation matrix operates on density mx expressed as sum of
Qutrit basis matrices
- "auto" = "pp" if possible (integer num of qubits), "qt" if density
matrix dim == 3, and "gm" otherwise.
parameterization : {"full","TP"}, optional
How to parameterize the gates of the resulting Model (see
documentation for :meth:`create_operation`).
evotype : Evotype or str, optional
The evolution type of this model, describing how states are
represented. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
Returns
-------
ExplicitOpModel
The created model.
"""
#Note: so far, all allowed `parameterization` values => densitymx evotype
state_space = _statespace.StateSpace.cast(state_space)
stateSpaceDim = state_space.dim
# Note: what about state_space_labels.tpb_dims?
if basis == "auto":
if _np.isclose(_np.log2(stateSpaceDim) / 2,
round(_np.log2(stateSpaceDim) / 2)):
basis = "pp"
elif stateSpaceDim == 9:
basis = "qt"
else: basis = "gm"
return _create_explicit_model_from_expressions(state_space,
_Basis.cast(basis, state_space),
op_labels, op_expressions,
prep_labels, prep_expressions,
effect_labels, effect_expressions,
povm_labels, gate_type=gate_type,
prep_type=prep_type, povm_type=povm_type,
instrument_type=instrument_type, evotype=evotype)
def create_explicit_alias_model(mdl_primitives, alias_dict):
"""
Creates a model by applying aliases to an existing model.
The new model is created by composing the gates of an existing `Model`,
`mdl_primitives`, according to a dictionary of `Circuit`s, `alias_dict`.
The keys of `alias_dict` are the operation labels of the returned `Model`.
state preparations and POVMs are unaltered, and simply copied from `mdl_primitives`.
Parameters
----------
mdl_primitives : Model
A Model containing the "primitive" gates (those used to compose
the gates of the returned model).
alias_dict : dictionary
A dictionary whose keys are strings and values are Circuit objects
specifying sequences of primitive gates. Each key,value pair specifies
the composition rule for a creating a gate in the returned model.
Returns
-------
Model
A model whose gates are compositions of primitive gates and whose
spam operations are the same as those of `mdl_primitives`.
"""
mdl_new = mdl_primitives.copy()
for gl in mdl_primitives.operations.keys():
del mdl_new.operations[gl] # remove all gates from mdl_new
for gl, opstr in alias_dict.items():
mdl_new.operations[gl] = mdl_primitives.sim.product(opstr)
#Creates fully parameterized gates by default...
mdl_new._clean_paramvec()
return mdl_new
def create_explicit_model(processor_spec, custom_gates=None,
depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,
depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',
lindblad_parameterization='auto',
evotype="default", simulator="auto",
ideal_gate_type='auto', ideal_spam_type='computational',
embed_gates=False, basis='pp'):
modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization,
lindblad_parameterization, allow_nonlocal=True)
return _create_explicit_model(processor_spec, modelnoise, custom_gates, evotype,
simulator, ideal_gate_type, ideal_spam_type, ideal_spam_type, embed_gates, basis)
def _create_explicit_model(processor_spec, modelnoise, custom_gates=None, evotype="default", simulator="auto",
ideal_gate_type='auto', ideal_prep_type='auto', ideal_povm_type='auto',
embed_gates=False, basis='pp'):
qubit_labels = processor_spec.qubit_labels
state_space = _statespace.QubitSpace(qubit_labels)
evotype = _Evotype.cast(evotype)
modelnoise = _OpModelNoise.cast(modelnoise)
modelnoise.reset_access_counters()
if custom_gates is None:
custom_gates = {}
if ideal_gate_type == "auto":
ideal_gate_type = ('static standard', 'static clifford', 'static unitary')
if ideal_prep_type == "auto":
ideal_prep_type = _state.state_type_from_op_type(ideal_gate_type)
if ideal_povm_type == "auto":
ideal_povm_type = _povm.povm_type_from_op_type(ideal_gate_type)
def _embed_unitary(statespace, target_labels, unitary):
dummyop = _op.EmbeddedOp(statespace, target_labels,
_op.StaticUnitaryOp(unitary, basis='pp', evotype="statevec_slow")) # basis hardcode?
return dummyop.to_dense("Hilbert")
local_gates = _setup_local_gates(processor_spec, evotype, None, {}, ideal_gate_type) # no custom *local* gates
ret = _emdl.ExplicitOpModel(state_space, basis, default_gate_type=ideal_gate_type, evotype=evotype,
simulator=simulator)
# Special rule: when initializng an explicit model, if the processor spec has an implied global idle
# gate (e.g. "(idle)", then the created model instead has a empty-tuple Label as the key for this op.
global_idle_name = processor_spec.global_idle_gate_name
if (global_idle_name is not None) and global_idle_name.startswith('(') and global_idle_name.endswith(')'):
gn_to_make_emptytup = global_idle_name
else:
gn_to_make_emptytup = None
for gn, gate_unitary in processor_spec.gate_unitaries.items():
gate_is_factory = callable(gate_unitary)
resolved_avail = processor_spec.resolved_availability(gn)
if callable(resolved_avail) or resolved_avail == '*':
assert (embed_gates), "Cannot create factories with `embed_gates=False` yet!"
key = _label.Label(gn) if (gn != gn_to_make_emptytup) else _label.Label(())
allowed_sslbls_fn = resolved_avail if callable(resolved_avail) else None
gate_nQubits = processor_spec.gate_num_qubits(gn)
ideal_factory = _opfactory.EmbeddingOpFactory(
state_space, local_gates[gn], num_target_labels=gate_nQubits, allowed_sslbls_fn=allowed_sslbls_fn)
noiseop = modelnoise.create_errormap(key, evotype, state_space) # No target indices... just local errs?
factory = ideal_factory if (noiseop is None) else _op.ComposedOpFactory([ideal_factory, noiseop])
ret.factories[key] = factory
else: # resolved_avail is a list/tuple of available sslbls for the current gate/factory
for inds in resolved_avail: # inds are target qubit labels
key = _label.Label(()) if (inds is None and gn == gn_to_make_emptytup) else _label.Label(gn, inds)
if key in custom_gates: # allow custom_gates to specify gate elements directly
if isinstance(custom_gates[key], _opfactory.OpFactory):
ret.factories[key] = custom_gates[key]
elif isinstance(custom_gates[key], _op.LinearOperator):
ret.operations[key] = custom_gates[key]
else: # presumably a numpy array or something like it.
ret.operations[key] = _op.StaticArbitraryOp(custom_gates[key], evotype,
state_space) # static gates by default
continue
if gate_is_factory:
assert(embed_gates), "Cannot create factories with `embed_gates=False` yet!"
# TODO: check for modelnoise on *local* factory, i.e. create_errormap(gn, ...)??
if inds is None or inds == tuple(qubit_labels): # then no need to embed
ideal_factory = local_gates[gn]
else:
ideal_factory = _opfactory.EmbeddedOpFactory(state_space, inds, local_gates[gn])
noiseop = modelnoise.create_errormap(key, evotype, state_space, target_labels=inds)
factory = ideal_factory if (noiseop is None) else _op.ComposedOpFactory([ideal_factory, noiseop])
ret.factories[key] = factory
else:
if inds is None or inds == tuple(qubit_labels): # then no need to embed
if isinstance(gate_unitary, (int, _np.int64)): # interpret gate_unitary as identity
assert(gate_unitary == len(qubit_labels)), \
"Idle unitary as int should be on all qubits for %s" % (str(gn))
ideal_gate = _op.ComposedOp([], evotype, state_space) # (identity gate on *all* qubits)
else:
ideal_gate = _op.create_from_unitary_mx(gate_unitary, ideal_gate_type, 'pp',
None, evotype, state_space)
else:
if embed_gates:
ideal_gate = local_gates[gn]
ideal_gate = _op.EmbeddedOp(state_space, inds, ideal_gate)
else:
if isinstance(gate_unitary, (int, _np.int64)): # interpret gate_unitary as identity
gate_unitary = _np.identity(2**gate_unitary, 'd') # turn into explicit identity op
if gate_unitary.shape[0] == state_space.udim: # no need to embed!
embedded_unitary = gate_unitary
else:
embedded_unitary = _embed_unitary(state_space, inds, gate_unitary)
ideal_gate = _op.create_from_unitary_mx(embedded_unitary, ideal_gate_type, 'pp',
None, evotype, state_space)
#TODO: check for modelnoise on *local* gate, i.e. create_errormap(gn, ...)??
noiseop = modelnoise.create_errormap(key, evotype, state_space, target_labels=inds)
layer = _op.ComposedOp([ideal_gate, noiseop]) if (noiseop is not None) else ideal_gate
ret.operations[key] = layer
# SPAM:
local_noise = False; independent_gates = True; independent_spam = True
prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,
ideal_prep_type, ideal_povm_type, evotype,
state_space, independent_gates, independent_spam)
for k, v in prep_layers.items():
ret.preps[k] = v
for k, v in povm_layers.items():
ret.povms[k] = v
modelnoise.warn_about_zero_counters()
ret._clean_paramvec()
return ret
def _create_spam_layers(processor_spec, modelnoise, local_noise,
ideal_prep_type, ideal_povm_type, evotype, state_space, independent_gates, independent_spam):
""" local_noise=True creates lindblad ops that are embedded & composed 1Q ops, and assumes
that modelnoise specifies 1Q noise. local_noise=False assumes modelnoise specifies n-qubit noise"""
qubit_labels = processor_spec.qubit_labels
num_qubits = processor_spec.num_qubits
singleQ_state_space = _statespace.default_space_for_udim(2) # single qubit state space
# Step 1 -- get the ideal prep and POVM, created as the types we want
# Step 2 -- add noise, by composing ideal with a noise operation (if desired)
prep_layers = {}
povm_layers = {}
def _add_prep_noise(prep_ops):
""" Adds one or more noise ops to prep_ops lists (to compose later) """
if local_noise: # then assume modelnoise specifies 1Q errors
prep_noiseop1Q = modelnoise.create_errormap('prep', evotype, singleQ_state_space, target_labels=None)
if prep_noiseop1Q is not None:
err_gates = [prep_noiseop1Q.copy() for i in range(num_qubits)] \
if independent_gates else [prep_noiseop1Q] * num_qubits
prep_ops.extend([_op.EmbeddedOp(state_space, [qubit_labels[i]], err_gates[i])
for i in range(num_qubits)])
else: # use modelnoise to construct n-qubit noise
prepNoiseMap = modelnoise.create_errormap('prep', evotype, state_space, target_labels=None,
qubit_graph=processor_spec.qubit_graph)
if prepNoiseMap is not None: prep_ops.append(prepNoiseMap)
def _add_povm_noise(povm_ops):
""" Adds one or more noise ops to prep_ops lists (to compose later) """
if local_noise: # then assume modelnoise specifies 1Q errors
povm_noiseop1Q = modelnoise.create_errormap('povm', evotype, singleQ_state_space, target_labels=None)
if povm_noiseop1Q is not None:
err_gates = [povm_noiseop1Q.copy() for i in range(num_qubits)] \
if independent_gates else [povm_noiseop1Q] * num_qubits
povm_ops.extend([_op.EmbeddedOp(state_space, [qubit_labels[i]], err_gates[i])
for i in range(num_qubits)])
else: # use modelnoise to construct n-qubit noise
povmNoiseMap = modelnoise.create_errormap('povm', evotype, state_space, target_labels=None,
qubit_graph=processor_spec.qubit_graph)
if povmNoiseMap is not None: povm_ops.append(povmNoiseMap)
def _add_to_prep_layers(ideal_prep, prep_ops):
""" Adds noise elements to prep_layers """
if len(prep_ops_to_compose) == 0:
prep_layers['rho0'] = ideal_prep
elif len(prep_ops_to_compose) == 1:
prep_layers['rho0'] = _state.ComposedState(ideal_prep, prep_ops[0])
else:
prep_layers['rho0'] = _state.ComposedState(ideal_prep, _op.ComposedOp(prep_ops))
def _add_to_povm_layers(ideal_povm, povm_ops):
""" Adds noise elements to povm_layers """
if len(povm_ops_to_compose) == 0:
povm_layers['Mdefault'] = ideal_povm
elif len(povm_ops_to_compose) == 1:
povm_layers['Mdefault'] = _povm.ComposedPOVM(povm_ops[0], ideal_povm, 'pp')
else:
povm_layers['Mdefault'] = _povm.ComposedPOVM(_op.ComposedOp(povm_ops), ideal_povm, 'pp')
def _create_nq_noise(lndtype):
if local_noise:
# create a 1-qubit exp(errorgen) that is applied to each qubit independently
errgen_1Q = _op.LindbladErrorgen.from_error_generator(singleQ_state_space.dim, lndtype, 'pp', 'pp',
truncate=True, evotype=evotype, state_space=None)
err_gateNQ = _op.ComposedOp([_op.EmbeddedOp(state_space, [qubit_labels[i]],
_op.ExpErrorgenOp(errgen_1Q.copy()))
for i in range(num_qubits)], evotype, state_space)
else:
# create an n-qubit exp(errorgen)
errgen_NQ = _op.LindbladErrorgen.from_error_generator(state_space.dim, lndtype, 'pp', 'pp',
truncate=True, evotype=evotype,
state_space=state_space)
err_gateNQ = _op.ExpErrorgenOp(errgen_NQ)
return err_gateNQ
# Here's where the actual logic starts. The above functions avoid repeated blocks within the different
# cases below.
# Prep logic
if isinstance(ideal_prep_type, (tuple, list)): ideal_prep_type = ideal_prep_type[0] # HACK to support multiple vals
if ideal_prep_type == 'computational' or ideal_prep_type.startswith('lindblad '):
ideal_prep = _state.ComputationalBasisState([0] * num_qubits, 'pp', evotype, state_space)
prep_ops_to_compose = []
if ideal_prep_type.startswith('lindblad '): # then add a composed exp(errorgen) to computational SPAM
lndtype = ideal_prep_type[len('lindblad '):]
err_gateNQ = _create_nq_noise(lndtype)
prep_ops_to_compose.append(err_gateNQ)
# Add noise
_add_prep_noise(prep_ops_to_compose)
#Add final ops to returned dictionaries (Note: None -> ComputationPOVM within ComposedPOVM)
_add_to_prep_layers(ideal_prep, prep_ops_to_compose)
elif ideal_prep_type.startswith('tensor product '):
#Note: with "tensor product <X>" types, e.g. "tensor product static", we assume modelnoise specifies just
# a 1Q noise operation, even when `local_noise=False`
vectype = ideal_prep_type[len('tensor product '):]
v0, v1 = _np.array([1, 0], 'd'), _np.array([0, 1], 'd')
ideal_prep1Q = _state.create_from_pure_vector(v0, vectype, 'pp', evotype, state_space=None)
prep_factors = [ideal_prep1Q.copy() for i in range(num_qubits)]
# Add noise
prep_noiseop1Q = modelnoise.create_errormap('prep', evotype, singleQ_state_space, target_labels=None)
if prep_noiseop1Q is not None:
prep_factors = [_state.ComposedState(
factor, (prep_noiseop1Q.copy() if independent_spam else prep_noiseop1Q)) for factor in prep_factors]
prep_layers['rho0'] = _state.TensorProductState(prep_factors, state_space)
else: # assume ideal_spam_type is a valid 'vectype' for creating n-qubit state vectors & POVMs
vectype = ideal_prep_type
vecs = [] # all the basis vectors for num_qubits
for i in range(2**num_qubits):
v = _np.zeros(2**num_qubits, 'd'); v[i] = 1.0
vecs.append(v)
ideal_prep = _state.create_from_pure_vector(vecs[0], vectype, 'pp', evotype, state_space=state_space)
# Add noise
prep_ops_to_compose = []
_add_prep_noise(prep_ops_to_compose)
# Add final ops to returned dictionaries
_add_to_prep_layers(ideal_prep, prep_ops_to_compose)
# Povm logic
if isinstance(ideal_povm_type, (tuple, list)): ideal_povm_type = ideal_povm_type[0] # HACK to support multiple vals
if ideal_povm_type == 'computational' or ideal_povm_type.startswith('lindblad '):
ideal_povm = _povm.ComputationalBasisPOVM(num_qubits, evotype, state_space=state_space)
povm_ops_to_compose = []
if ideal_povm_type.startswith('lindblad '): # then add a composed exp(errorgen) to computational SPAM
lndtype = ideal_povm_type[len('lindblad '):]
err_gateNQ = _create_nq_noise(lndtype)
povm_ops_to_compose.append(err_gateNQ.copy()) # .copy() => POVM errors independent
# Add noise
_add_povm_noise(povm_ops_to_compose)
#Add final ops to returned dictionaries (Note: None -> ComputationPOVM within ComposedPOVM)
effective_ideal_povm = None if len(povm_ops_to_compose) > 0 else ideal_povm
_add_to_povm_layers(effective_ideal_povm, povm_ops_to_compose)
elif ideal_povm_type.startswith('tensor product '):
#Note: with "tensor product <X>" types, e.g. "tensor product static", we assume modelnoise specifies just
# a 1Q noise operation, even when `local_noise=False`
vectype = ideal_povm_type[len('tensor product '):]
v0, v1 = _np.array([1, 0], 'd'), _np.array([0, 1], 'd')
ideal_povm1Q = _povm.create_from_pure_vectors([('0', v0), ('1', v1)], vectype, 'pp',
evotype, state_space=None)
povm_factors = [ideal_povm1Q.copy() for i in range(num_qubits)]
# Add noise
povm_noiseop1Q = modelnoise.create_errormap('povm', evotype, singleQ_state_space, target_labels=None)
if povm_noiseop1Q is not None:
povm_factors = [_povm.ComposedPOVM(
(povm_noiseop1Q.copy() if independent_spam else povm_noiseop1Q), factor, 'pp')
for factor in povm_factors]
povm_layers['Mdefault'] = _povm.TensorProductPOVM(povm_factors, evotype, state_space)
else: # assume ideal_spam_type is a valid 'vectype' for creating n-qubit state vectors & POVMs
vectype = ideal_povm_type
vecs = [] # all the basis vectors for num_qubits
for i in range(2**num_qubits):
v = _np.zeros(2**num_qubits, 'd'); v[i] = 1.0
vecs.append(v)
ideal_povm = _povm.create_from_pure_vectors(
[(format(i, 'b').zfill(num_qubits), v) for i, v in enumerate(vecs)],
vectype, 'pp', evotype, state_space=state_space)
# Add noise
povm_ops_to_compose = []
_add_povm_noise(povm_ops_to_compose)
# Add final ops to returned dictionaries
_add_to_povm_layers(ideal_povm, povm_ops_to_compose)
return prep_layers, povm_layers
def _setup_local_gates(processor_spec, evotype, modelnoise=None, custom_gates=None,
ideal_gate_type=('static standard', 'static clifford', 'static unitary')):
"""
Construct a dictionary of potentially noisy gates that act only on their target qubits.
These gates are "local" because they act only on their intended target qubits. The gates
consist of an ideal gate (obviously local, and crosstalk free) of the type given by
`ideal_gate_type` composed with a noise operation given by `modelnoise`, if one exists.
The returned dictionary contains keys for all the gate names in `processor_spec`. Custom
gate objects can be given by `custom_gates`, which override the normal gate construction.
Parameters
----------
processor_spec : ProcessorSpec
The processor to create gate operations for. This object specifies the
gate names and unitaries for the processor, among other things.
evotype : Evotype
Create gate objects with this evolution type.
modelnoise : ModelNoise, optional
Noise that should be applied after the ideal gates. This noise must
be *local* to each gate (i.e. acting on its target qubits). See the
:class:`ModelNoise` object documentation for details regarding how
to specify different types of noise. If `None`, then no noise is added .
custom_gates : dict, optional
A dictionary of gate objects that should be placed in the returned
dictionary in lieu of objects that would normally be constructed.
Keys are gate names and values are gates.
ideal_gate_type : str or tuple, optional
A gate type or tuple of gate types (listed in order of priority) which
is used to construct the ideal gates. A gate type usually specifies the
Python class that will be created, which determines 1) the parameterization
of the gate and 2) the class/category of the gate (e.g. a :class:`StaticClifford`
operation has no parameters and is a Clifford operation).
Returns
-------
gatedict : dict
A dictionary mapping gate names to local gate operations.
"""
std_gate_unitaries = _itgs.standard_gatename_unitaries()
if custom_gates is None: custom_gates = {}
if modelnoise is None: modelnoise = _OpModelPerOpNoise({})
# All possible entries into the upcoming gate dictionary
# Not just gatenames as it is possible to override in qubit-specific operations
all_keys = _lt.remove_duplicates(list(processor_spec.gate_names)
+ list(custom_gates.keys())
+ list(modelnoise.keys()))
# Cache ideal ops to ensure only one copy for each name
ideal_gates = {}
ideal_factories = {}
gatedict = _collections.OrderedDict()
for key in all_keys:
# Use custom gate directly as error gate
if key in custom_gates:
gatedict[key] = custom_gates[key]
continue
# Skip prep, and povm here, just do gates
if key in ['prep', 'povm']:
continue
# If key has qubits, get base name for lookup
label = _label.Label(key)
name = label.name
U = processor_spec.gate_unitaries[name] # all gate names must be in the processorspec
if ((name not in processor_spec.nonstd_gate_unitaries)
or (not callable(processor_spec.nonstd_gate_unitaries[name]) and (name in std_gate_unitaries)
and processor_spec.nonstd_gate_unitaries[name].shape == std_gate_unitaries[name].shape
and _np.allclose(processor_spec.nonstd_gate_unitaries[name], std_gate_unitaries[name]))):
stdname = name # setting `stdname` != None means we can try to create a StaticStandardOp below
else:
stdname = None
if isinstance(U, (int, _np.int64)): # signals that the gate is an identity on `U` qubits
ideal_gate_state_space = _statespace.default_space_for_num_qubits(U)
noiseop = modelnoise.create_errormap(key, evotype, ideal_gate_state_space, target_labels=None)
if noiseop is not None:
gatedict[key] = noiseop
else:
gatedict[key] = _op.ComposedOp([], evotype, ideal_gate_state_space) # (identity gate on N qubits)
elif not callable(U): # normal operation (not a factory)
ideal_gate = ideal_gates.get(name, None)
if ideal_gate is None:
ideal_gate = _op.create_from_unitary_mx(U, ideal_gate_type, 'pp', stdname, evotype, state_space=None)
ideal_gates[name] = ideal_gate
noiseop = modelnoise.create_errormap(key, evotype, ideal_gate.state_space, target_labels=None)
# Note: above line creates a *local* noise op, working entirely in the ideal gate's target space.
# This means it will fail to create error maps with a given (non-local/stencil) set of sslbls, as desired
if noiseop is None:
gatedict[key] = ideal_gate
else:
if isinstance(noiseop, _op.ComposedOp): # avoid additional nested ComposedOp if we already have one
noiseop.insert(0, ideal_gate)
gatedict[key] = noiseop
else:
gatedict[key] = _op.ComposedOp([ideal_gate, noiseop])
else: # a factory, given by the unitary-valued function U: args -> unitary
ideal_factory = ideal_factories.get(name, None)
if ideal_factory is None:
local_state_space = _statespace.default_space_for_udim(U.shape[0]) # factory *function* SHAPE
ideal_factory = _opfactory.UnitaryOpFactory(U, local_state_space, 'pp', evotype)
ideal_factories[name] = ideal_factory
noiseop = modelnoise.create_errormap(key, evotype, ideal_factory.state_space, target_labels=None)
gatedict[key] = _opfactory.ComposedOpFactory([ideal_factory, noiseop]) \
if (noiseop is not None) else ideal_factory
return gatedict
def create_crosstalk_free_model(processor_spec, custom_gates=None,
depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,
depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',
lindblad_parameterization='auto',
evotype="default", simulator="auto", on_construction_error='raise',
independent_gates=False, independent_spam=True, ensure_composed_gates=False,
ideal_gate_type='auto', ideal_spam_type='computational', implicit_idle_mode='none'):
"""
Create a n-qubit "crosstalk-free" model.
By virtue of being crosstalk-free, this model's operations only
act nontrivially on their target qubits. Gates consist of an ideal gate
operation possibly followed by an error operation.
Errors can be specified using any combination of the 4 error rate/coeff arguments,
but each gate name must be provided exclusively to one type of specification.
Each specification results in a different type of operation, depending on the parameterization:
- `depolarization_strengths` -> DepolarizeOp, StochasticNoiseOp, or exp(LindbladErrorgen)
- `stochastic_error_probs` -> StochasticNoiseOp or exp(LindbladErrorgen)
- `lindblad_error_coeffs` -> exp(LindbladErrorgen)
In addition to the gate names, the special values `"prep"` and `"povm"` may be
used as keys to specify the error on the state preparation, measurement, respectively.
Parameters
----------
processor_spec : ProcessorSpec
The processor specification to create a model for. This object specifies the
gate names and unitaries for the processor, and their availability on the
processor.
custom_gates : dict, optional
A dictionary that associates with gate labels
:class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`
objects. These objects override any other behavior for constructing
their designated operations. Keys of this dictionary may
be string-type gate *names* or labels that include target qubits.
depolarization_strengths : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are floats that specify the strength of uniform depolarization.
stochastic_error_probs : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are tuples that specify Pauli-stochastic rates for each of the non-trivial
Paulis (so a 3-tuple would be expected for a 1Q gate and a 15-tuple for a 2Q gate).
lindblad_error_coeffs : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are dictionaries corresponding to the `lindblad_term_dict` kwarg taken
by `LindbladErrorgen`. Keys are `(termType, basisLabel1, <basisLabel2>)`
tuples, where `termType` can be `"H"` (Hamiltonian), `"S"`
(Stochastic), or `"A"` (Affine). Hamiltonian and Affine terms always
have a single basis label (so key is a 2-tuple) whereas Stochastic
tuples with 1 basis label indicate a *diagonal* term, and are the
only types of terms allowed when `nonham_mode != "all"`. Otherwise,
Stochastic term tuples can include 2 basis labels to specify
"off-diagonal" non-Hamiltonian Lindblad terms. Basis labels can be
strings or integers. Values are complex coefficients.
depolarization_parameterization : str of {"depolarize", "stochastic", or "lindblad"}
Determines whether a DepolarizeOp, StochasticNoiseOp, or LindbladErrorgen
is used to parameterize the depolarization noise, respectively.
When "depolarize" (the default), a DepolarizeOp is created with the strength given
in `depolarization_strengths`. When "stochastic", the depolarization strength is split
evenly among the stochastic channels of a StochasticOp. When "lindblad", the depolarization
strength is split evenly among the coefficients of the stochastic error generators
(which are exponentiated to form a LindbladErrorgen with the "depol" parameterization).
stochastic_parameterization : str of {"stochastic", or "lindblad"}
Determines whether a StochasticNoiseOp or LindbladErrorgen is used to parameterize the
stochastic noise, respectively. When "stochastic", elements of `stochastic_error_probs`
are used as coefficients in a linear combination of stochastic channels (the default).
When "lindblad", the elements of `stochastic_error_probs` are coefficients of
stochastic error generators (which are exponentiated to form a LindbladErrorgen with the
"cptp" parameterization).
lindblad_parameterization : "auto" or a LindbladErrorgen paramtype
Determines the parameterization of the LindbladErrorgen. When "auto" (the default), the parameterization
is inferred from the types of error generators specified in the `lindblad_error_coeffs` dictionaries.
When not "auto", the parameterization type is passed through to the LindbladErrorgen.
evotype : Evotype or str, optional
The evolution type. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
simulator : ForwardSimulator or {"auto", "matrix", "map"}
The simulator used to compute predicted probabilities for the
resulting :class:`Model`. Using `"auto"` selects `"matrix"` when there
are 2 qubits or less, and otherwise selects `"map"`.
on_construction_error : {'raise','warn',ignore'}
What to do when the creation of a gate with the given
`parameterization` fails. Usually you'll want to `"raise"` the error.
In some cases, for example when converting as many gates as you can
into `parameterization="clifford"` gates, `"warn"` or even `"ignore"`
may be useful.
independent_gates : bool, optional
Whether gates are allowed independent local noise or not. If False,
then all gates with the same name (e.g. "Gx") will have the *same*
(local) noise (e.g. an overrotation by 1 degree), and the
`operation_bks['gates']` dictionary contains a single key per gate
name. If True, then gates with the same name acting on different
qubits may have different local noise, and so the
`operation_bks['gates']` dictionary contains a key for each gate
available gate placement.
ensure_composed_gates : bool, optional
If True then the elements of the `operation_bks['gates']` will always
be :class:`ComposedOp` objects. The purpose of this is to
facilitate modifying the gate operations after the model is created.
If False, then the appropriately parameterized gate objects (often
dense gates) are used directly.
ideal_gate_type : str or tuple, optional
A gate type or tuple of gate types (listed in order of priority) which
is used to construct the ideal gates. A gate type usually specifies the
Python class that will be created, which determines 1) the parameterization
of the gate and 2) the class/category of the gate (e.g. a :class:`StaticClifford`
operation has no parameters and is a Clifford operation).
ideal_spam_type : str or tuple, optional
Similar to `ideal_gate_type` but for SPAM elements (state preparations
and POVMs).
implicit_idle_mode : {'none', 'add_global'}
The way idel operations are added implicitly within the created model. `"none"`
doesn't add any "extra" idle operations when there is a layer that contains some
gates but not gates on all the qubits. `"add_global"` adds the global idle operation,
i.e., the operation for a global idle layer (zero gates - a completely empty layer),
to every layer that is simulated, using the global idle as a background idle that always
occurs regardless of the operation.
Returns
-------
LocalNoiseModel
A model with `"rho0"` prep, `"Mdefault"` POVM, and gates labeled by
the gate names and qubit labels (as specified by `processor_spec`).
For instance, the operation label for the `"Gx"` gate on the second
qubit might be `Label("Gx",1)`.
"""
modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization,
lindblad_parameterization, allow_nonlocal=False)
return _create_crosstalk_free_model(processor_spec, modelnoise, custom_gates, evotype,
simulator, on_construction_error, independent_gates, independent_spam,
ensure_composed_gates, ideal_gate_type, ideal_spam_type, ideal_spam_type,
implicit_idle_mode)
def _create_crosstalk_free_model(processor_spec, modelnoise, custom_gates=None, evotype="default", simulator="auto",
on_construction_error='raise', independent_gates=False, independent_spam=True,
ensure_composed_gates=False, ideal_gate_type='auto', ideal_prep_type='auto',
ideal_povm_type='auto', implicit_idle_mode='none'):
"""
Create a n-qubit "crosstalk-free" model.
Similar to :method:`create_crosstalk_free_model` but the noise is input more generally,
as a :class:`ModelNoise` object. Arguments are the same as this function except that
`modelnoise` is given instead of several more specific noise-describing arguments.
Returns
-------
LocalNoiseModel
"""
qubit_labels = processor_spec.qubit_labels
state_space = _statespace.QubitSpace(qubit_labels)
evotype = _Evotype.cast(evotype)
modelnoise = _OpModelNoise.cast(modelnoise)
modelnoise.reset_access_counters()
if ideal_gate_type == "auto":
ideal_gate_type = ('static standard', 'static clifford', 'static unitary')
if ideal_prep_type == "auto":
ideal_prep_type = _state.state_type_from_op_type(ideal_gate_type)
if ideal_povm_type == "auto":
ideal_povm_type = _povm.povm_type_from_op_type(ideal_gate_type)
gatedict = _setup_local_gates(processor_spec, evotype, modelnoise, custom_gates, ideal_gate_type)
# (Note: global idle is now handled through processor-spec processing)
# SPAM:
local_noise = True
prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,
ideal_prep_type, ideal_povm_type, evotype,
state_space, independent_gates, independent_spam)
modelnoise.warn_about_zero_counters()
return _LocalNoiseModel(processor_spec, gatedict, prep_layers, povm_layers,
evotype, simulator, on_construction_error,
independent_gates, ensure_composed_gates,
implicit_idle_mode)
def create_cloud_crosstalk_model(processor_spec, custom_gates=None,
depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,
depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',
lindblad_parameterization='auto', evotype="default", simulator="auto",
independent_gates=False, independent_spam=True, errcomp_type="gates",
implicit_idle_mode="none", verbosity=0):
"""
Create a n-qubit "cloud-crosstalk" model.
In a cloud crosstalk model, gates consist of a (local) ideal gates followed
by an error operation that can act nontrivially on *any* of the processor's qubits
(not just a gate's target qubits). Typically a gate's errors are specified
relative to the gate's target qubits, forming a "cloud" of errors around the
target qubits using some notion of locality (that may not be spatial, e.g.
local in frequency). Currently, the "ideal" portion of each gate can only be
created as a *static* (parameterless) object -- all gate parameters come from
the error operation.
Errors can be specified using any combination of the 4 error rate/coeff arguments,
but each gate name must be provided exclusively to one type of specification.
Each specification results in a different type of operation, depending on the parameterization:
- `depolarization_strengths` -> DepolarizeOp, StochasticNoiseOp, or exp(LindbladErrorgen)
- `stochastic_error_probs` -> StochasticNoiseOp or exp(LindbladErrorgen)
- `lindblad_error_coeffs` -> exp(LindbladErrorgen)
In addition to the gate names, the special values `"prep"` and `"povm"` may be
used as keys to specify the error on the state preparation, measurement, respectively.
Parameters
----------
processor_spec : ProcessorSpec
The processor specification to create a model for. This object specifies the
gate names and unitaries for the processor, and their availability on the
processor.
custom_gates : dict, optional
A dictionary that associates with gate labels
:class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`
objects. These objects override any other behavior for constructing
their designated operations. Keys of this dictionary may
be string-type gate *names* or labels that include target qubits.
depolarization_strengths : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are floats that specify the strength of uniform depolarization.
stochastic_error_probs : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are tuples that specify Pauli-stochastic rates for each of the non-trivial
Paulis (so a 3-tuple would be expected for a 1Q gate and a 15-tuple for a 2Q gate).
lindblad_error_coeffs : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are dictionaries corresponding to the `lindblad_term_dict` kwarg taken
by `LindbladErrorgen`. Keys are `(termType, basisLabel1, <basisLabel2>)`
tuples, where `termType` can be `"H"` (Hamiltonian), `"S"`
(Stochastic), or `"A"` (Affine). Hamiltonian and Affine terms always
have a single basis label (so key is a 2-tuple) whereas Stochastic
tuples with 1 basis label indicate a *diagonal* term, and are the
only types of terms allowed when `nonham_mode != "all"`. Otherwise,
Stochastic term tuples can include 2 basis labels to specify
"off-diagonal" non-Hamiltonian Lindblad terms. Basis labels can be
strings or integers. Values are complex coefficients.
depolarization_parameterization : str of {"depolarize", "stochastic", or "lindblad"}
Determines whether a DepolarizeOp, StochasticNoiseOp, or LindbladErrorgen
is used to parameterize the depolarization noise, respectively.
When "depolarize" (the default), a DepolarizeOp is created with the strength given
in `depolarization_strengths`. When "stochastic", the depolarization strength is split
evenly among the stochastic channels of a StochasticOp. When "lindblad", the depolarization
strength is split evenly among the coefficients of the stochastic error generators
(which are exponentiated to form a LindbladErrorgen with the "depol" parameterization).
stochastic_parameterization : str of {"stochastic", or "lindblad"}
Determines whether a StochasticNoiseOp or LindbladErrorgen is used to parameterize the
stochastic noise, respectively. When "stochastic", elements of `stochastic_error_probs`
are used as coefficients in a linear combination of stochastic channels (the default).
When "lindblad", the elements of `stochastic_error_probs` are coefficients of
stochastic error generators (which are exponentiated to form a LindbladErrorgen with the
"cptp" parameterization).
lindblad_parameterization : "auto" or a LindbladErrorgen paramtype
Determines the parameterization of the LindbladErrorgen. When "auto" (the default), the parameterization
is inferred from the types of error generators specified in the `lindblad_error_coeffs` dictionaries.
When not "auto", the parameterization type is passed through to the LindbladErrorgen.
evotype : Evotype or str, optional
The evolution type. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
simulator : ForwardSimulator or {"auto", "matrix", "map"}
The simulator used to compute predicted probabilities for the
resulting :class:`Model`. Using `"auto"` selects `"matrix"` when there
are 2 qubits or less, and otherwise selects `"map"`.
independent_gates : bool, optional
Whether gates are allowed independent noise or not. If False,
then all gates with the same name (e.g. "Gx") will have the *same*
noise (e.g. an overrotation by 1 degree), and the
`operation_bks['cloudnoise']` dictionary will contains a single key per gate
name. If True, then gates with the same name acting on different
qubits may have different local noise, and so the
`operation_bks['cloudnoise']` dictionary contains a key for each gate
available gate placement.
independent_spam : bool, optional
Similar to `indepenent_gates` but for SPAM operations.
errcomp_type : {'gates', 'errorgens'}
Whether errors should be combined by composing error maps (`gates`) or by
exponentiating the sum of error generators (composing the error generators,
`errorgens`). The latter is only an option when the noise is given solely
in terms of Lindblad error coefficients.
implicit_idle_mode : {'none', 'add_global'}
The way idel operations are added implicitly within the created model. `"none"`
doesn't add any "extra" idle operations when there is a layer that contains some
gates but not gates on all the qubits. `"add_global"` adds the global idle operation,
i.e., the operation for a global idle layer (zero gates - a completely empty layer),
to every layer that is simulated, using the global idle as a background idle that always
occurs regardless of the operation.
verbosity : int or VerbosityPrinter, optional
Amount of detail to print to stdout.
Returns
-------
CloudNoiseModel
"""
modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization,
lindblad_parameterization, allow_nonlocal=True)
return _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates, evotype,
simulator, independent_gates, independent_spam, errcomp_type,
implicit_idle_mode, verbosity)
def _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates=None,
evotype="default", simulator="auto", independent_gates=False,
independent_spam=True, errcomp_type="errorgens",
implicit_idle_mode="none", verbosity=0):
"""
Create a n-qubit "cloud-crosstalk" model.
Similar to :method:`create_cloud_crosstalk_model` but the noise is input more generally,
as a :class:`ModelNoise` object. Arguments are the same as this function except that
`modelnoise` is given instead of several more specific noise-describing arguments.
Returns
-------
CloudNoiseModel
"""
qubit_labels = processor_spec.qubit_labels
state_space = _statespace.QubitSpace(qubit_labels) # FUTURE: allow other types of state spaces somehow?
evotype = _Evotype.cast(evotype)
modelnoise = _OpModelNoise.cast(modelnoise)
modelnoise.reset_access_counters()
printer = _VerbosityPrinter.create_printer(verbosity)
#Create static ideal gates without any noise (we use `modelnoise` further down)
gatedict = _setup_local_gates(processor_spec, evotype, None, custom_gates,
ideal_gate_type=('static standard', 'static clifford', 'static unitary'))
stencils = _collections.OrderedDict()
# (Note: global idle is now processed with other processorspec gates)
# SPAM
local_noise = False
prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,
'computational', 'computational', evotype, state_space,
independent_gates, independent_spam)
if errcomp_type == 'gates':
create_stencil_fn = modelnoise.create_errormap_stencil
apply_stencil_fn = modelnoise.apply_errormap_stencil
elif errcomp_type == 'errorgens':
create_stencil_fn = modelnoise.create_errorgen_stencil
apply_stencil_fn = modelnoise.apply_errorgen_stencil
else:
raise ValueError("Invalid `errcomp_type` value: %s" % str(errcomp_type))
def build_cloudnoise_fn(lbl):
# lbl will be for a particular gate and target qubits. If we have error rates for this specific gate
# and target qubits (i.e this primitive layer op) then we should build it directly (and independently,
# regardless of the value of `independent_gates`) using these rates. Otherwise, if we have a stencil
# for this gate, then we should use it to construct the output, using a copy when gates are independent
# and a reference to the *same* stencil operations when `independent_gates==False`.
num_sslbls = len(lbl.sslbls) if (lbl.sslbls is not None) else None
if lbl in modelnoise:
stencil = create_stencil_fn(lbl, evotype, state_space, num_target_labels=num_sslbls)
elif lbl.name in stencils:
stencil = stencils[lbl.name]
elif lbl.name in modelnoise:
stencils[lbl.name] = create_stencil_fn(lbl.name, evotype, state_space, num_target_labels=num_sslbls)
stencil = stencils[lbl.name]
else:
return None # no cloudnoise error for this label
return apply_stencil_fn(stencil, evotype, state_space, target_labels=lbl.sslbls,
qubit_graph=processor_spec.qubit_graph,
copy=independent_gates and (lbl not in modelnoise)) # no need to copy if first case
def build_cloudkey_fn(lbl):
num_sslbls = len(lbl.sslbls) if (lbl.sslbls is not None) else None
if lbl in modelnoise:
stencil = create_stencil_fn(lbl, evotype, state_space, num_target_labels=num_sslbls)
elif lbl.name in stencils:
stencil = stencils[lbl.name]
elif lbl.name in modelnoise:
stencils[lbl.name] = create_stencil_fn(lbl.name, evotype, state_space, num_target_labels=num_sslbls)
stencil = stencils[lbl.name]
else:
# simple cloud-key when there is no cloud noise
return tuple(lbl.sslbls) if (lbl.sslbls is not None) else qubit_labels
#Otherwise, process stencil to get a list of all the qubit labels `lbl`'s cloudnoise error
# touches and form this into a key
cloud_sslbls = modelnoise.compute_stencil_absolute_sslbls(stencil, state_space, lbl.sslbls,
processor_spec.qubit_graph)
hashable_sslbls = tuple(lbl.sslbls) if (lbl.sslbls is not None) else qubit_labels
cloud_key = (hashable_sslbls, tuple(sorted(cloud_sslbls))) # (sets are unhashable)
return cloud_key
ret = _CloudNoiseModel(processor_spec, gatedict, prep_layers, povm_layers,
build_cloudnoise_fn, build_cloudkey_fn,
simulator, evotype, errcomp_type,
implicit_idle_mode, printer)
modelnoise.warn_about_zero_counters() # must do this after model creation so build_ fns have been run
return ret
def create_cloud_crosstalk_model_from_hops_and_weights(
processor_spec, custom_gates=None,
max_idle_weight=1, max_spam_weight=1,
maxhops=0, extra_weight_1_hops=0, extra_gate_weight=0,
simulator="auto", evotype='default',
gate_type="H+S", spam_type="H+S",
implicit_idle_mode="none", errcomp_type="gates",
independent_gates=True, independent_spam=True,
connected_highweight_errors=True,
verbosity=0):
"""
Create a "cloud crosstalk" model based on maximum error weights and hops along the processor's qubit graph.
This function provides a convenient way to construct cloud crosstalk models whose gate errors
consist of Pauli elementary error generators (i.e. that correspond to Lindblad error coefficients)
that are limited in weight (number of non-identity Paulis) and support (which qubits have non-trivial
Paulis on them). Errors are taken to be approximately local, meaning they are concentrated near the
target qubits of a gate, with the notion of locality taken from the processor specification's qubit graph.
The caller provides maximum-weight, maximum-hop (a "hop" is the movement along a single graph edge), and
gate type arguments to specify the set of possible errors on a gate.
- The global idle gate (corresponding to an empty circuit layer) has errors that are limited only by
a maximum weight, `max_idle_weight`.
- State preparation and POVM errors are constructed similarly, with a global-idle-like error following
or preceding the preparation or measurement, respectively.
- Gate errors are placed on all the qubits that can be reached with at most `maxhops` hops from (any of)
the gate's target qubits. Elementary error generators up to weight `W`, where `W` equals the number
of target qubits (e.g., 2 for a CNOT gate) plus `extra_gate_weight` are allowed. Weight-1 terms
are a special case, and the `extra_weight_1_hops` argument adds to the usual `maxhops` in this case
to allow weight-1 errors on a possibly larger region of qubits around the target qubits.
Parameters
----------
processor_spec : ProcessorSpec
The processor specification to create a model for. This object specifies the
gate names and unitaries for the processor, and their availability on the
processor.
custom_gates : dict
A dictionary that associates with gate labels
:class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`
objects. These objects describe the full action of the gate or
primitive-layer they're labeled by (so if the model represents
states by density matrices these objects are superoperators, not
unitaries), and override any standard construction based on builtin
gate names or `nonstd_gate_unitaries`. Keys of this dictionary must
be string-type gate *names* -- they cannot include state space labels
-- and they must be *static* (have zero parameters) because they
represent only the ideal behavior of each gate -- the cloudnoise
operations represent the parameterized noise. To fine-tune how this
noise is parameterized, call the :class:`CloudNoiseModel` constructor
directly.
max_idle_weight : int, optional
The maximum-weight for errors on the global idle gate.
max_spam_weight : int, optional
The maximum-weight for state preparation and measurement (SPAM) errors.
maxhops : int
The locality constraint: for a gate, errors (of weight up to the
maximum weight for the gate) are allowed to occur on the gate's
target qubits and those reachable by hopping at most `maxhops` times
from a target qubit along nearest-neighbor links (defined by the
`geometry`).
extra_weight_1_hops : int, optional
Additional hops (adds to `maxhops`) for weight-1 errors. A value > 0
can be useful for allowing just weight-1 errors (of which there are
relatively few) to be dispersed farther from a gate's target qubits.
For example, a crosstalk-detecting model might use this.
extra_gate_weight : int, optional
Addtional weight, beyond the number of target qubits (taken as a "base
weight" - i.e. weight 2 for a 2Q gate), allowed for gate errors. If
this equals 1, for instance, then 1-qubit gates can have up to weight-2
errors and 2-qubit gates can have up to weight-3 errors.
simulator : ForwardSimulator or {"auto", "matrix", "map"}
The circuit simulator used to compute any
requested probabilities, e.g. from :method:`probs` or
:method:`bulk_probs`. Using `"auto"` selects `"matrix"` when there
are 2 qubits or less, and otherwise selects `"map"`.
evotype : Evotype or str, optional
The evolution type of this model, describing how states are
represented. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
gate_type : str, optional
The Lindblad-error parameterization type used for gate operations. This
may be expanded in the future, but currently the gate errors *must* be of
the Lindblad error-generator coefficients type, and this argument specifies
what elementary error-generator coefficients are initially allowed (and linked to
model parameters), before maximum-weight and locality constraints are imposed.
In addition to the usual Lindblad error types, (e.g. `"H"`, `"H+S"`) the special
values `"none"` is allowed to indicate that there should be no errors on the gates
(useful if you only want errors on the SPAM, for instance).
spam_type : str, optional
Similar to `gate_type` but for SPAM elements (state preparations
and POVMs). This specifies the Lindblad-error parameterization for the
state prepearation and POVM.
implicit_idle_mode : {'none', 'add_global'}
The way idel operations are added implicitly within the created model. `"nonw"`
doesn't add any "extra" idle operations when there is a layer that contains some
gates but not gates on all the qubits. `"add_global"` adds the global idle operation,
i.e., the operation for a global idle layer (zero gates - a completely empty layer),
to every layer that is simulated, using the global idle as a background idle that always
occurs regardless of the operation.
errcomp_type : {"gates","errorgens"}
How errors are composed when creating layer operations in the created
model. `"gates"` means that the errors on multiple gates in a single
layer are composed as separate and subsequent processes. Specifically,
the layer operation has the form `Composed(target,idleErr,cloudErr)`
where `target` is a composition of all the ideal gate operations in the
layer, `idleErr` is the global idle error if `implicit_idle_mode == 'add_global'`,
and `cloudErr` is the composition (ordered as layer-label) of cloud-
noise contributions, i.e. a map that acts as the product of exponentiated
error-generator matrices. `"errorgens"` means that layer operations
have the form `Composed(target, error)` where `target` is as above and
`error` results from composing (summing) the idle and cloud-noise error
*generators*, i.e. a map that acts as the exponentiated sum of error
generators (ordering is irrelevant in this case).
independent_gates : bool, optional
Whether the noise added to a gate when it acts on one set of target
qubits is independent of its noise on a different set of target qubits.
If False, then all gates with the same name (e.g. "Gx") will be constrained
to having the *same* noise on the cloud around the target qubits (even though
the target qubits and cloud are different). If True, then gate noise operations
for different sets of target qubits are independent.
independent_spam : bool, optional
Similar to `independent_gates` but for state preparation and measurement operations.
When `False`, the noise applied to each set (individual or pair or triple etc.) of
qubits must be the same, e.g., if the state preparation is a perfect preparation followed
by a single-qubit rotation then this rotation must be by the *same* angle on all of
the qubits.
connected_highweight_errors : bool, optional
An additional constraint regarding high-weight errors. When `True`, only high weight
(weight 2+) elementary error generators whose non-trivial Paulis occupy a *connected*
portion of the qubit graph are allowed. For example, if the qubit graph is a 1D chain
of 4 qubits, 1-2-3-4, and weight-2 errors are allowed on a single-qubit gate with
target = qubit-2, then weight-2 errors on 1-2 and 2-3 would be allowed, but errors on
1-3 would be forbidden. When `False`, no constraint is imposed.
verbosity : int or VerbosityPrinter, optional
An integer >= 0 dictating how must output to send to stdout.
Returns
-------
CloudNoiseModel
"""
# construct noise specifications for the cloudnoise model
modelnoise = {}
all_qubit_labels = processor_spec.qubit_labels
conn = connected_highweight_errors # shorthand: whether high-weight errors must be connected on the graph
global_idle_name = processor_spec.global_idle_gate_name
# Global Idle
if max_idle_weight > 0:
assert(global_idle_name is not None), \
"`max_idle_weight` must equal 0 for processor specs without a global idle gate!"
#printer.log("Creating Idle:")
wt_maxhop_tuples = [(i, None) for i in range(1, max_idle_weight + 1)]
modelnoise[global_idle_name] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples,
gate_type, conn)
# SPAM
if max_spam_weight > 0:
wt_maxhop_tuples = [(i, None) for i in range(1, max_spam_weight + 1)]
modelnoise['prep'] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples, spam_type, conn)
modelnoise['povm'] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples, spam_type, conn)
# Gates
weight_maxhops_tuples_1Q = [(1, maxhops + extra_weight_1_hops)] + \
[(1 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
weight_maxhops_tuples_2Q = [(1, maxhops + extra_weight_1_hops), (2, maxhops)] + \
[(2 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
for gatenm, gate_unitary in processor_spec.gate_unitaries.items():
if gatenm == global_idle_name: continue # processed above
gate_nQubits = int(gate_unitary) if isinstance(gate_unitary, (int, _np.int64)) \
else int(round(_np.log2(gate_unitary.shape[0]))) # NOTE: integer gate_unitary => idle on n qubits
if gate_nQubits not in (1, 2):
raise ValueError("Only 1- and 2-qubit gates are supported. %s acts on %d qubits!"
% (str(gatenm), gate_nQubits))
weight_maxhops_tuples = weight_maxhops_tuples_1Q if gate_nQubits == 1 else weight_maxhops_tuples_2Q
target_sslbls = ('@0',) if gate_nQubits == 1 else ('@0', '@1')
modelnoise[gatenm] = _build_weight_maxhops_modelnoise(target_sslbls, weight_maxhops_tuples,
gate_type, conn)
return _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates,
evotype, simulator, independent_gates, independent_spam,
errcomp_type, implicit_idle_mode, verbosity)
def _iter_basis_inds(weight):
""" Iterate over product of `weight` non-identity Pauli 1Q basis indices """
basisIndList = [[1, 2, 3]] * weight # assume pauli 1Q basis, and only iterate over non-identity els
for basisInds in _itertools.product(*basisIndList):
yield basisInds
def _pauli_product_matrix(sigma_inds):
"""
Construct the Pauli product matrix from the given `sigma_inds`
Parameters
----------
sigma_inds : iterable
A sequence of integers in the range [0,3] corresponding to the
I, X, Y, Z Pauli basis matrices.
Returns
-------
numpy.ndarray or scipy.sparse.csr_matrix
"""
sigmaVec = (id2x2 / sqrt2, sigmax / sqrt2, sigmay / sqrt2, sigmaz / sqrt2)
M = _np.identity(1, 'complex')
for i in sigma_inds:
M = _np.kron(M, sigmaVec[i])
return M
def _construct_restricted_weight_pauli_basis(wt, sparse=False):
basisEl_Id = _pauli_product_matrix(_np.zeros(wt, _np.int64))
errbasis = [basisEl_Id]
errbasis_lbls = ['I']
for err_basis_inds in _iter_basis_inds(wt):
error = _np.array(err_basis_inds, _np.int64) # length == wt
basisEl = _pauli_product_matrix(error)
errbasis.append(basisEl)
errbasis_lbls.append(''.join(["IXYZ"[i] for i in err_basis_inds]))
#printer.log("Error on qubits %s -> error basis of length %d" % (err_qubit_inds, len(errbasis)), 3)
return _ExplicitBasis(errbasis, errbasis_lbls, real=True, sparse=sparse)
def _build_weight_maxhops_modelnoise(target_sslbls, weight_maxhops_tuples, lnd_parameterization, connected=True):
# This function:
# loop over all size-`wt` *connected* combinations, `err_qubit_inds`, of the qubit indices in
# `possible_err_qubit_inds`
# - construct a local weight-`wt` Pauli basis & corresponding LindbladErrorgen on `wt` qubits
# => replace with: opnoise.create_errorgen(evotype, state_space=None) where opnoise is for a wt-qubit op
# - embed this constructed local error onto `err_qubit_inds`
# - append embedded error onto running list
#
# Noise object structure:
# OpModelPerOpNoise( { op_key/'idle': { sslbls : opnoise } } )
# where sslbls can be absolute labels or stencil labels
# -- could have a fn that spreads a single opnoise onto all the sslbls
# given by size-`wt` connected combos of `possible_err_qubit_inds` - this would work for independent clouds
# -- have LindbladNoiseDict and another LindbladPauliAtWeight (?) noise objects,
# since we want to specify a lindblad noise by giving a weight and an initial basis (Pauli here)
# To build a cloudnoise model from hops & weights:
modelnoise_dict = {}
if lnd_parameterization == 'none' or lnd_parameterization is None:
return {} # special case when we don't want any error parameterization
for wt, max_hops in weight_maxhops_tuples:
if max_hops is None or max_hops == 0: # Note: maxHops not used in this case
stencil_lbl = _stencil.StencilLabelAllCombos(target_sslbls, wt, connected)
else:
stencil_lbl = _stencil.StencilLabelRadiusCombos(target_sslbls, max_hops, wt, connected)
local_state_space = _statespace.default_space_for_num_qubits(wt)
modelnoise_dict[stencil_lbl] = _LindbladNoise.from_basis_coefficients(
lnd_parameterization, _construct_restricted_weight_pauli_basis(wt),
local_state_space)
return modelnoise_dict
def _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization, lindblad_parameterization,
allow_nonlocal):
modelnoises = []
if depolarization_strengths is not None:
noise_dict = {}
for lbl, val in depolarization_strengths.items():
if isinstance(val, dict): # then value is actually a dictionary of sslbls -> noise specifications
if not allow_nonlocal: raise ValueError("Nonlocal depolarization strengths not allowed!")
noise_dict[lbl] = {k: _DepolarizationNoise(v, depolarization_parameterization) for k, v in val.items()}
else:
noise_dict[lbl] = _DepolarizationNoise(val, depolarization_parameterization)
modelnoises.append(_OpModelPerOpNoise(noise_dict))
if stochastic_error_probs is not None:
noise_dict = {}
for lbl, val in stochastic_error_probs.items():
if isinstance(val, dict): # then value is actually a dictionary of sslbls -> noise specifications
if not allow_nonlocal: raise ValueError("Nonlocal stochastic error probs not allowed!")
noise_dict[lbl] = {k: _StochasticNoise(v, stochastic_parameterization) for k, v in val.items()}
else:
noise_dict[lbl] = _StochasticNoise(val, stochastic_parameterization)
modelnoises.append(_OpModelPerOpNoise(noise_dict))
if lindblad_error_coeffs is not None:
if not allow_nonlocal: # the easy case
modelnoises.append(_OpModelPerOpNoise({lbl: _LindbladNoise(val, lindblad_parameterization)
for lbl, val in lindblad_error_coeffs.items()}))
else: # then need to process labels like ('H', 'XX:0,1') or 'HXX:0,1'
def process_stencil_labels(flat_lindblad_errs):
nonlocal_errors = _collections.OrderedDict()
local_errors = _collections.OrderedDict()
for nm, val in flat_lindblad_errs.items():
if isinstance(nm, str): nm = (nm[0], nm[1:]) # e.g. "HXX" => ('H','XX')
err_typ, basisEls = nm[0], nm[1:]
sslbls = None
local_nm = [err_typ]
for bel in basisEls: # e.g. bel could be "X:Q0" or "XX:Q0,Q1"
# OR "X:<n>" where n indexes a target qubit or "X:<dir>" where dir indicates
# a graph *direction*, e.g. "up"
if ':' in bel:
bel_name, bel_sslbls = bel.split(':') # should have form <name>:<comma-separated-sslbls>
bel_sslbls = bel_sslbls.split(',') # e.g. ('Q0','Q1')
integerized_sslbls = []
for ssl in bel_sslbls:
try: integerized_sslbls.append(int(ssl))
except: integerized_sslbls.append(ssl)
bel_sslbls = tuple(integerized_sslbls)
else:
bel_name = bel
bel_sslbls = None
if sslbls is None:
sslbls = bel_sslbls
else:
#Note: sslbls should always be the same if there are multiple basisEls,
# i.e for nm == ('S',bel1,bel2)
assert(sslbls is bel_sslbls or sslbls == bel_sslbls), \
"All basis elements of the same error term must operate on the *same* state!"
local_nm.append(bel_name) # drop the state space labels, e.g. "XY:Q0,Q1" => "XY"
# keep track of errors by the qubits they act on, as only each such
# set will have it's own LindbladErrorgen
local_nm = tuple(local_nm) # so it's hashable
if sslbls is not None:
sslbls = tuple(sorted(sslbls))
if sslbls not in nonlocal_errors:
nonlocal_errors[sslbls] = _collections.OrderedDict()
if local_nm in nonlocal_errors[sslbls]:
nonlocal_errors[sslbls][local_nm] += val
else:
nonlocal_errors[sslbls][local_nm] = val
else:
if local_nm in local_errors:
local_errors[local_nm] += val
else:
local_errors[local_nm] = val
if len(nonlocal_errors) == 0:
return _LindbladNoise(local_errors, lindblad_parameterization)
else:
all_errors = []
if len(local_errors) > 0:
all_errors.append((None, _LindbladNoise(local_errors, lindblad_parameterization)))
for sslbls, errdict in nonlocal_errors.items():
all_errors.append((sslbls, _LindbladNoise(errdict, lindblad_parameterization)))
return _collections.OrderedDict(all_errors)
modelnoises.append(_OpModelPerOpNoise({lbl: process_stencil_labels(val)
for lbl, val in lindblad_error_coeffs.items()}))
return _ComposedOpModelNoise(modelnoises)
@_deprecated_fn("This function is overly specific and will be removed soon.")
def _nparams_xycnot_cloudnoise_model(num_qubits, geometry="line", max_idle_weight=1, maxhops=0,
extra_weight_1_hops=0, extra_gate_weight=0, require_connected=False,
independent_1q_gates=True, zz_only=False, bidirectional_cnots=True, verbosity=0):
"""
Compute the number of parameters in a particular :class:`CloudNoiseModel`.
Returns the number of parameters in the :class:`CloudNoiseModel` containing
X(pi/2), Y(pi/2) and CNOT gates using the specified arguments without
actually constructing the model (useful for considering parameter-count
scaling).
Parameters
----------
num_qubits : int
The total number of qubits.
geometry : {"line","ring","grid","torus"} or QubitGraph
The type of connectivity among the qubits, specifying a
graph used to define neighbor relationships. Alternatively,
a :class:`QubitGraph` object may be passed directly.
max_idle_weight : int, optional
The maximum-weight for errors on the global idle gate.
maxhops : int
The locality constraint: for a gate, errors (of weight up to the
maximum weight for the gate) are allowed to occur on the gate's
target qubits and those reachable by hopping at most `maxhops` times
from a target qubit along nearest-neighbor links (defined by the
`geometry`).
extra_weight_1_hops : int, optional
Additional hops (adds to `maxhops`) for weight-1 errors. A value > 0
can be useful for allowing just weight-1 errors (of which there are
relatively few) to be dispersed farther from a gate's target qubits.
For example, a crosstalk-detecting model might use this.
extra_gate_weight : int, optional
Addtional weight, beyond the number of target qubits (taken as a "base
weight" - i.e. weight 2 for a 2Q gate), allowed for gate errors. If
this equals 1, for instance, then 1-qubit gates can have up to weight-2
errors and 2-qubit gates can have up to weight-3 errors.
require_connected : bool, optional
If True, then high-weight errors only occur on connected (via `geometry`) qubits.
For example in a line of qubits there would not be weight-2 errors on qubits 1 and 3.
independent_1q_gates : bool, optional
If True, 1Q gates on different qubits have separate (distinct) parameters. If
False, the 1Q gates of each type (e.g. an pi/2 X gate) for different qubits share
the same set of parameters.
zz_only : bool, optional
If True, the only high-weight errors allowed are of "Z^n" type.
bidirectional_cnots : bool
Whether CNOT gates can be performed in either direction (and each direction should
be treated as an indepedent gate)
verbosity : int, optional
An integer >= 0 dictating how much output to send to stdout.
Returns
-------
int
"""
# noise can be either a seed or a random array that is long enough to use
printer = _VerbosityPrinter.create_printer(verbosity)
printer.log("Computing parameters for a %d-qubit %s model" % (num_qubits, geometry))
qubitGraph = _QubitGraph.common_graph(num_qubits, geometry, directed=True, all_directions=True)
#printer.log("Created qubit graph:\n"+str(qubitGraph))
def idle_count_nparams(max_weight):
"""Parameter count of a `build_nqn_global_idle`-constructed gate"""
ret = 0
possible_err_qubit_inds = _np.arange(num_qubits)
for wt in range(1, max_weight + 1):
nErrTargetLocations = qubitGraph.connected_combos(possible_err_qubit_inds, wt)
if zz_only and wt > 1: basisSizeWoutId = 1**wt # ( == 1)
else: basisSizeWoutId = 3**wt # (X,Y,Z)^wt
nErrParams = 2 * basisSizeWoutId # H+S terms
ret += nErrTargetLocations * nErrParams
return ret
def op_count_nparams(target_qubit_inds, weight_maxhops_tuples, debug=False):
"""Parameter count of a `build_nqn_composed_gate`-constructed gate"""
ret = 0
#Note: no contrib from idle noise (already parameterized)
for wt, maxHops in weight_maxhops_tuples:
possible_err_qubit_inds = _np.array(qubitGraph.radius(target_qubit_inds, maxHops), _np.int64)
if require_connected:
nErrTargetLocations = qubitGraph.connected_combos(possible_err_qubit_inds, wt)
else:
nErrTargetLocations = _scipy.special.comb(len(possible_err_qubit_inds), wt)
if zz_only and wt > 1: basisSizeWoutId = 1**wt # ( == 1)
else: basisSizeWoutId = 3**wt # (X,Y,Z)^wt
nErrParams = 2 * basisSizeWoutId # H+S terms
if debug:
print(" -- wt%d, hops%d: inds=%s locs = %d, eparams=%d, total contrib = %d" %
(wt, maxHops, str(possible_err_qubit_inds), nErrTargetLocations,
nErrParams, nErrTargetLocations * nErrParams))
ret += nErrTargetLocations * nErrParams
return ret
nParams = _collections.OrderedDict()
printer.log("Creating Idle:")
nParams[_label.Label('Gi')] = idle_count_nparams(max_idle_weight)
#1Q gates: X(pi/2) & Y(pi/2) on each qubit
weight_maxhops_tuples_1Q = [(1, maxhops + extra_weight_1_hops)] + \
[(1 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
if independent_1q_gates:
for i in range(num_qubits):
printer.log("Creating 1Q X(pi/2) and Y(pi/2) gates on qubit %d!!" % i)
nParams[_label.Label("Gx", i)] = op_count_nparams((i,), weight_maxhops_tuples_1Q)
nParams[_label.Label("Gy", i)] = op_count_nparams((i,), weight_maxhops_tuples_1Q)
else:
printer.log("Creating common 1Q X(pi/2) and Y(pi/2) gates")
rep = int(num_qubits / 2)
nParams[_label.Label("Gxrep")] = op_count_nparams((rep,), weight_maxhops_tuples_1Q)
nParams[_label.Label("Gyrep")] = op_count_nparams((rep,), weight_maxhops_tuples_1Q)
#2Q gates: CNOT gates along each graph edge
weight_maxhops_tuples_2Q = [(1, maxhops + extra_weight_1_hops), (2, maxhops)] + \
[(2 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
seen_pairs = set()
for i, j in qubitGraph.edges(): # note: all edges have i<j so "control" of CNOT is always lower index (arbitrary)
if bidirectional_cnots is False:
ordered_tup = (i, j) if i <= j else (j, i)
if ordered_tup in seen_pairs: continue
else: seen_pairs.add(ordered_tup)
printer.log("Creating CNOT gate between qubits %d and %d!!" % (i, j))
nParams[_label.Label("Gcnot", (i, j))] = op_count_nparams((i, j), weight_maxhops_tuples_2Q)
#SPAM
nPOVM_1Q = 4 # params for a single 1Q POVM
nParams[_label.Label('rho0')] = 3 * num_qubits # 3 b/c each component is TP
nParams[_label.Label('Mdefault')] = nPOVM_1Q * num_qubits # num_qubits 1Q-POVMs
return nParams, sum(nParams.values())
| to_label |
status.py | import xbmc
import xbmcgui
import sys
import urllib
import utils
from stream_api import app_state
class | (object):
installed = False
percent = 0
title1 = ""
def update(self, app):
if app["state"] == 0 or app["state"] == 2 or app["state"] == 258 or app["state"] == 1282 or app["state"] == 260 or app["state"] == 1048576 or app["state"] == 1286:
self.title1 = ""
if app["estimated_seconds_remaining"] != -1:
self.title1 = utils.translation(32021)+" "+str(int(app["estimated_seconds_remaining"] / 60 + 1))+" "+utils.translation(32022)
self.percent = int((float(app["bytes_downloaded"]) / float(app["bytes_to_download"])) * 100.0)
elif app["state"] == 4:
self.installed = True
else:
self.percent = 0
self.title1 = utils.translation(32023)+": "+utils.translation(app_state.state(app["state"]))
print "Unknown State: "+str(app["state"])
def status(service, params):
app_id = int(params.get('id'))
username = urllib.unquote_plus(params.get('username'))
hostname = urllib.unquote_plus(params.get('hostname'))
progress = xbmcgui.DialogProgress()
progress.create(utils.translation(32020), " ", " ", " ")
update = Updater()
#get app updates
while not progress.iscanceled():
xbmc.sleep(100)
update.update(service.update_app((hostname, username), app_id))
if update.installed:
break
else:
progress.update(update.percent, update.title1, "", "")
progress.close()
| Updater |
index.ts | import { rename, unlink } from "fs/promises";
import type { NextApiRequest, NextApiResponse } from "next";
import { join } from "path";
import { FILE_DATA_DIR } from "../../../lib";
import { getUser } from "../../../lib/utils";
export default async function | (req: NextApiRequest, res: NextApiResponse) {
const user = await getUser(req);
if (!user) return res.status(401).send(null);
if (req.method === "DELETE") {
if (!req.body) return res.status(400).send({ message: "Missing name in request body" });
const { name } = req.body;
if (typeof name !== "string" || !name.length) return res.status(400).send({ message: "Name is not a valid string" });
const path = join(FILE_DATA_DIR, name);
if (!path.startsWith(FILE_DATA_DIR)) return res.status(403).send({ message: "Forbidden" });
try {
await unlink(path);
res.status(204).send(null);
} catch (err) {
res.status(500).json({ message: "Something went wrong on our side, please try again later." });
}
return;
} else if (req.method === "PATCH") {
if (!req.body) return res.status(400).send({ message: "Missing name in request body" });
const { oldName, newName } = req.body;
if (typeof oldName !== "string" || !oldName.length) return res.status(400).send({ message: "oldName is not a valid string" });
if (typeof newName !== "string" || !newName.length) return res.status(400).send({ message: "newName is not a valid string" });
const path = join(FILE_DATA_DIR, oldName);
if (!path.startsWith(FILE_DATA_DIR)) return res.status(403).send({ message: "Forbidden" });
const newPath = join(FILE_DATA_DIR, newName);
if (!newPath.startsWith(FILE_DATA_DIR)) return res.status(403).send({ message: "Forbidden" });
try {
await rename(path, newPath);
res.status(204).send(null);
} catch (err) {
res.status(500).json({ message: "Something went wrong on our side, please try again later." });
}
return;
}
res.status(403).json({ message: "Method forbidden on this route" });
}
| handler |
input_csv.py | from __future__ import unicode_literals, division, absolute_import
import logging
import csv
from requests import RequestException
from flexget.entry import Entry
from flexget.plugin import register_plugin, PluginError
from flexget.utils.cached_input import cached
log = logging.getLogger('csv')
class InputCSV(object):
"""
Adds support for CSV format. Configuration may seem a bit complex,
but this has advantage of being universal solution regardless of CSV
and internal entry fields.
Configuration format:
csv:
url: <url>
values:
<field>: <number>
Example DB-fansubs:
csv:
url: http://www.dattebayo.com/t/dump
values:
title: 3 # title is in 3th field
url: 1 # download url is in 1st field
Fields title and url are mandatory. First field is 1.
List of other common (optional) fields can be found from wiki.
"""
schema = {
'type': 'object',
'properties': {
'url': {'type': 'string', 'format': 'url'},
'values': {'type': 'object', 'additionalProperties': {'type': 'integer'}}
},
'required': ['values'],
'additionalProperties': False
}
@cached('csv')
def | (self, task, config):
entries = []
try:
r = task.requests.get(config['url'])
except RequestException as e:
raise PluginError('Error fetching `%s`: %s' % (config['url'], e))
# CSV module needs byte strings, we'll convert back to unicode later
page = r.text.encode('utf-8').splitlines()
for row in csv.reader(page):
if not row:
continue
entry = Entry()
for name, index in config.get('values', {}).items():
try:
# Convert the value back to unicode
entry[name] = row[index - 1].decode('utf-8').strip()
except IndexError:
raise PluginError('Field `%s` index is out of range' % name)
entries.append(entry)
return entries
register_plugin(InputCSV, 'csv', api_ver=2)
| on_task_input |
forms.py | from django.utils.safestring import mark_safe
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from oauth2_provider.forms import AllowForm as DotAllowForm
from oauth2_provider.models import get_application_model
from oauth2_provider.scopes import get_scopes_backend
from oauth2_provider.settings import oauth2_settings
from oauth2_provider.validators import urlsplit
class CustomRegisterApplicationForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
agree_label = u'Yes I have read and agree to the <a target="_blank" href="%s">API Terms of Service Agreement</a>*' % (
settings.TOS_URI)
super(CustomRegisterApplicationForm, self).__init__(*args, **kwargs)
self.fields['authorization_grant_type'].choices = settings.GRANT_TYPES
self.fields['client_type'].initial = 'confidential'
self.fields['agree'].label = mark_safe(agree_label)
self.fields['name'].label = "Name*"
self.fields['name'].required = True
self.fields['client_type'].label = "Client Type*"
self.fields[
'authorization_grant_type'].label = "Authorization Grant Type*"
self.fields['redirect_uris'].label = "Redirect URIs*"
class Meta:
model = get_application_model()
fields = ('name',
'client_type',
'authorization_grant_type', 'redirect_uris',
'logo_uri', 'policy_uri', 'tos_uri', 'contacts',
'agree')
required_css_class = 'required'
def clean(self):
client_type = self.cleaned_data.get('client_type')
authorization_grant_type = self.cleaned_data.get(
'authorization_grant_type')
redirect_uris = self.cleaned_data.get('redirect_uris')
msg = ""
validate_error = False
# Public clients don't use authorization-code flow
if client_type == 'public' and authorization_grant_type == 'authorization-code':
validate_error = True
msg += 'A public client may not request ' \
'an authorization-code grant type.'
# Confidential clients cannot use implicit authorization_grant_type
if client_type == 'confidential' and authorization_grant_type == 'implicit':
validate_error = True
msg += 'A confidential client may not ' \
'request an implicit grant type.'
# Confidential clients cannot use implicit authorization_grant_type
if client_type == 'confidential' and authorization_grant_type == 'implicit':
validate_error = True
msg += 'A confidential client may not ' \
'request an implicit grant type.'
# Native mobile applications using RCF 8252 must supply https or
# LL00000000
for uri in redirect_uris.split():
scheme, netloc, path, query, fragment = urlsplit(uri)
valid_schemes = get_allowed_schemes()
if scheme in valid_schemes:
validate_error = False
else:
validate_error = True
if validate_error:
msg += '%s is an invalid scheme. Redirect URIs must use %s ' \
% (scheme, ' or '.join(valid_schemes))
if validate_error:
msg_output = _(msg)
raise forms.ValidationError(msg_output)
else:
pass
return self.cleaned_data
def clean_client_type(self):
client_type = self.cleaned_data.get('client_type')
authorization_grant_type = self.cleaned_data.get(
'authorization_grant_type')
if client_type == 'public' and authorization_grant_type == 'authorization-code':
msg = _(
'A public client may not request an '
'authorization-code grant type.')
raise forms.ValidationError(msg)
return client_type
def clean_agree(self):
agree = self.cleaned_data.get('agree')
if not agree:
msg = _('You must agree to the API Terms of Service Agreement')
raise forms.ValidationError(msg)
return agree
def clean_redirect_uris(self):
redirect_uris = self.cleaned_data.get('redirect_uris')
if getattr(settings, 'BLOCK_HTTP_REDIRECT_URIS', True):
if redirect_uris:
for u in redirect_uris.split():
if u.startswith("http://"):
msg = _('Redirect URIs must not use http.')
raise forms.ValidationError(msg)
return redirect_uris
class SimpleAllowForm(DotAllowForm):
pass
class AllowForm(DotAllowForm):
scope = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple)
expires_in = forms.TypedChoiceField(choices=settings.DOT_EXPIRES_IN, coerce=int,
empty_value=None,
label="Access to this application expires in")
def | (self, *args, **kwargs):
application = kwargs.pop('application', None)
if application is None:
super(AllowForm, self).__init__(*args, **kwargs)
else:
# we use the application instance to get the list of available scopes
# because it is needed to create the choices list for the `scope`
# field.
available_scopes = get_scopes_backend().get_available_scopes(application)
# set the available_scopes as the initial value so that
# all checkboxes are checked
kwargs['initial']['scope'] = available_scopes
# init the form to create self.fields
super(AllowForm, self).__init__(*args, **kwargs)
# get the list of all the scopes available in the system
# to get the description of each available scope.
all_scopes = get_scopes_backend().get_all_scopes()
choices = [(scope, all_scopes[scope])
for scope in available_scopes]
self.fields['scope'].choices = choices
def get_allowed_schemes():
"""
get allowed_schemes set in OAUTH2_PROVIDER.ALLOWED_REDIRECT_URI_SCHEMES
:return: list
"""
if oauth2_settings.ALLOWED_REDIRECT_URI_SCHEMES:
valid_list = oauth2_settings.ALLOWED_REDIRECT_URI_SCHEMES
else:
valid_list = ['https', ]
return valid_list
| __init__ |
clipboard.ts | /**
* --------------------------------------------------------------------- | * ---------------------------------------------------------------------
*/
export const copyToClipBoard = (text: string) => {
const textArea = document.createElement('textarea');
document.body.appendChild(textArea);
textArea.value = text;
textArea.select();
document.execCommand('copy');
document.body.removeChild(textArea);
}; | * Copyright (c) 2020 EclipseSource Munich
* Licensed under MIT
* https://github.com/eclipsesource/jsonforms-editor/blob/master/LICENSE |
karger.py | """
An implementation of Karger's Algorithm for partitioning a graph.
"""
from __future__ import annotations
import random
# Adjacency list representation of this graph:
# https://en.wikipedia.org/wiki/File:Single_run_of_Karger%E2%80%99s_Mincut_algorithm.svg
TEST_GRAPH = {
"1": ["2", "3", "4", "5"],
"2": ["1", "3", "4", "5"],
"3": ["1", "2", "4", "5", "10"],
"4": ["1", "2", "3", "5", "6"],
"5": ["1", "2", "3", "4", "7"],
"6": ["7", "8", "9", "10", "4"],
"7": ["6", "8", "9", "10", "5"],
"8": ["6", "7", "9", "10"],
"9": ["6", "7", "8", "10"],
"10": ["6", "7", "8", "9", "3"],
}
def partition_graph(graph: dict[str, list[str]]) -> set[tuple[str, str]]:
"""
Partitions a graph using Karger's Algorithm. Implemented from
pseudocode found here:
https://en.wikipedia.org/wiki/Karger%27s_algorithm.
This function involves random choices, meaning it will not give
consistent outputs.
Args:
graph: A dictionary containing adacency lists for the graph.
Nodes must be strings.
Returns:
The cutset of the cut found by Karger's Algorithm.
>>> graph = {'0':['1'], '1':['0']}
>>> partition_graph(graph)
{('0', '1')}
"""
# Dict that maps contracted nodes to a list of all the nodes it "contains."
contracted_nodes = {node: {node} for node in graph}
graph_copy = {node: graph[node][:] for node in graph}
while len(graph_copy) > 2:
# Choose a random edge.
u = random.choice(list(graph_copy.keys()))
v = random.choice(graph_copy[u]) |
# Contract edge (u, v) to new node uv
uv = u + v
uv_neighbors = list(set(graph_copy[u] + graph_copy[v]))
uv_neighbors.remove(u)
uv_neighbors.remove(v)
graph_copy[uv] = uv_neighbors
for neighbor in uv_neighbors:
graph_copy[neighbor].append(uv)
contracted_nodes[uv] = set(contracted_nodes[u].union(contracted_nodes[v]))
# Remove nodes u and v.
del graph_copy[u]
del graph_copy[v]
for neighbor in uv_neighbors:
if u in graph_copy[neighbor]:
graph_copy[neighbor].remove(u)
if v in graph_copy[neighbor]:
graph_copy[neighbor].remove(v)
# Find cutset.
groups = [contracted_nodes[node] for node in graph_copy]
return {
(node, neighbor)
for node in groups[0]
for neighbor in graph[node]
if neighbor in groups[1]
}
if __name__ == "__main__":
print(partition_graph(TEST_GRAPH)) | |
opencl.rs | use steam_audio_sys::ffi;
use crate::prelude::*;
pub struct OpenCLDevice(ffi::IPLOpenCLDevice);
| impl OpenCLDevice {} |
|
base.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Yahoo! Finance market data downloader (+fix for Pandas Datareader)
# https://github.com/ranaroussi/yfinance
#
# Copyright 2017-2019 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import time as _time
import datetime as _datetime
import requests as _requests
import pandas as _pd
import numpy as _np
try:
from urllib.parse import quote as urlencode
except ImportError:
from urllib import quote as urlencode
from . import utils
# import json as _json
# import re as _re
# import sys as _sys
from . import shared
class | ():
def __init__(self, ticker):
self.ticker = ticker.upper()
self._history = None
self._base_url = 'https://query1.finance.yahoo.com'
self._scrape_url = 'https://finance.yahoo.com/quote'
self._fundamentals = False
self._info = None
self._sustainability = None
self._recommendations = None
self._major_holders = None
self._institutional_holders = None
self._isin = None
self._calendar = None
self._expirations = {}
self._earnings = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._financials = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._balancesheet = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._cashflow = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
def history(self, period="1mo", interval="1d",
start=None, end=None, prepost=False, actions=True,
auto_adjust=True, back_adjust=False,
proxy=None, rounding=True, tz=None, **kwargs):
"""
:Parameters:
period : str
Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
Either Use period parameter or use start and end
interval : str
Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
Intraday data cannot extend last 60 days
start: str
Download start date string (YYYY-MM-DD) or _datetime.
Default is 1900-01-01
end: str
Download end date string (YYYY-MM-DD) or _datetime.
Default is now
prepost : bool
Include Pre and Post market data in results?
Default is False
auto_adjust: bool
Adjust all OHLC automatically? Default is True
back_adjust: bool
Back-adjusted data to mimic true historical prices
proxy: str
Optional. Proxy server URL scheme. Default is None
rounding: bool
Round values to 2 decimal places?
Optional. Default is False = precision suggested by Yahoo!
tz: str
Optional timezone locale for dates.
(default data is returned as non-localized dates)
**kwargs: dict
debug: bool
Optional. If passed as False, will suppress
error message printing to console.
"""
if start or period is None or period.lower() == "max":
if start is None:
start = -2208988800
elif isinstance(start, _datetime.datetime):
start = int(_time.mktime(start.timetuple()))
else:
start = int(_time.mktime(
_time.strptime(str(start), '%Y-%m-%d')))
if end is None:
end = int(_time.time())
elif isinstance(end, _datetime.datetime):
end = int(_time.mktime(end.timetuple()))
else:
end = int(_time.mktime(_time.strptime(str(end), '%Y-%m-%d')))
params = {"period1": start, "period2": end}
else:
period = period.lower()
params = {"range": period}
params["interval"] = interval.lower()
params["includePrePost"] = prepost
params["events"] = "div,splits"
# 1) fix weired bug with Yahoo! - returning 60m for 30m bars
if params["interval"] == "30m":
params["interval"] = "15m"
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
# Getting data from json
url = "{}/v8/finance/chart/{}".format(self._base_url, self.ticker)
data = _requests.get(url=url, params=params, proxies=proxy)
if "Will be right back" in data.text:
raise RuntimeError("*** YAHOO! FINANCE IS CURRENTLY DOWN! ***\n"
"Our engineers are working quickly to resolve "
"the issue. Thank you for your patience.")
data = data.json()
# Work with errors
debug_mode = True
if "debug" in kwargs and isinstance(kwargs["debug"], bool):
debug_mode = kwargs["debug"]
err_msg = "No data found for this date range, symbol may be delisted"
if "chart" in data and data["chart"]["error"]:
err_msg = data["chart"]["error"]["description"]
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
elif "chart" not in data or data["chart"]["result"] is None or \
not data["chart"]["result"]:
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
# parse quotes
try:
quotes = utils.parse_quotes(data["chart"]["result"][0], tz)
except Exception:
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
# 2) fix weired bug with Yahoo! - returning 60m for 30m bars
if interval.lower() == "30m":
quotes2 = quotes.resample('30T')
quotes = _pd.DataFrame(index=quotes2.last().index, data={
'Open': quotes2['Open'].first(),
'High': quotes2['High'].max(),
'Low': quotes2['Low'].min(),
'Close': quotes2['Close'].last(),
'Adj Close': quotes2['Adj Close'].last(),
'Volume': quotes2['Volume'].sum()
})
try:
quotes['Dividends'] = quotes2['Dividends'].max()
except Exception:
pass
try:
quotes['Stock Splits'] = quotes2['Dividends'].max()
except Exception:
pass
if auto_adjust:
quotes = utils.auto_adjust(quotes)
elif back_adjust:
quotes = utils.back_adjust(quotes)
if rounding:
quotes = _np.round(quotes, data[
"chart"]["result"][0]["meta"]["priceHint"])
quotes['Volume'] = quotes['Volume'].fillna(0).astype(_np.int64)
quotes.dropna(inplace=True)
# actions
dividends, splits = utils.parse_actions(data["chart"]["result"][0], tz)
# combine
df = _pd.concat([quotes, dividends, splits], axis=1, sort=True)
df["Dividends"].fillna(0, inplace=True)
df["Stock Splits"].fillna(0, inplace=True)
# index eod/intraday
df.index = df.index.tz_localize("UTC").tz_convert(
data["chart"]["result"][0]["meta"]["exchangeTimezoneName"])
if params["interval"][-1] in {"m", "h"}:
df.index.name = "Datetime"
else:
df.index = _pd.to_datetime(df.index.date)
if tz is not None:
df.index = df.index.tz_localize(tz)
df.index.name = "Date"
self._history = df.copy()
if not actions:
df.drop(columns=["Dividends", "Stock Splits"], inplace=True)
return df
# ------------------------
def _get_fundamentals(self, kind=None, proxy=None):
def cleanup(data):
df = _pd.DataFrame(data).drop(columns=['maxAge'])
for col in df.columns:
df[col] = _np.where(
df[col].astype(str) == '-', _np.nan, df[col])
df.set_index('endDate', inplace=True)
try:
df.index = _pd.to_datetime(df.index, unit='s')
except ValueError:
df.index = _pd.to_datetime(df.index)
df = df.T
df.columns.name = ''
df.index.name = 'Breakdown'
df.index = utils.camel2title(df.index)
return df
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
if self._fundamentals:
return
# get info and sustainability
url = '%s/%s' % (self._scrape_url, self.ticker)
data = utils.get_json(url, proxy)
# holders
url = "{}/{}/holders".format(self._scrape_url, self.ticker)
holders = _pd.read_html(url)
self._major_holders = holders[0]
self._institutional_holders = holders[1]
if 'Date Reported' in self._institutional_holders:
self._institutional_holders['Date Reported'] = _pd.to_datetime(
self._institutional_holders['Date Reported'])
if '% Out' in self._institutional_holders:
self._institutional_holders['% Out'] = self._institutional_holders[
'% Out'].str.replace('%', '').astype(float)/100
# sustainability
d = {}
if isinstance(data.get('esgScores'), dict):
for item in data['esgScores']:
if not isinstance(data['esgScores'][item], (dict, list)):
d[item] = data['esgScores'][item]
s = _pd.DataFrame(index=[0], data=d)[-1:].T
s.columns = ['Value']
s.index.name = '%.f-%.f' % (
s[s.index == 'ratingYear']['Value'].values[0],
s[s.index == 'ratingMonth']['Value'].values[0])
self._sustainability = s[~s.index.isin(
['maxAge', 'ratingYear', 'ratingMonth'])]
# info (be nice to python 2)
self._info = {}
items = ['summaryProfile', 'summaryDetail', 'quoteType',
'defaultKeyStatistics', 'assetProfile', 'summaryDetail']
for item in items:
if isinstance(data.get(item), dict):
self._info.update(data[item])
self._info['regularMarketPrice'] = self._info['regularMarketOpen']
self._info['logo_url'] = ""
try:
domain = self._info['website'].split(
'://')[1].split('/')[0].replace('www.', '')
self._info['logo_url'] = 'https://logo.clearbit.com/%s' % domain
except Exception:
pass
# events
try:
cal = _pd.DataFrame(
data['calendarEvents']['earnings'])
cal['earningsDate'] = _pd.to_datetime(
cal['earningsDate'], unit='s')
self._calendar = cal.T
self._calendar.index = utils.camel2title(self._calendar.index)
self._calendar.columns = ['Value']
except Exception:
pass
# analyst recommendations
try:
rec = _pd.DataFrame(
data['upgradeDowngradeHistory']['history'])
rec['earningsDate'] = _pd.to_datetime(
rec['epochGradeDate'], unit='s')
rec.set_index('earningsDate', inplace=True)
rec.index.name = 'Date'
rec.columns = utils.camel2title(rec.columns)
self._recommendations = rec[[
'Firm', 'To Grade', 'From Grade', 'Action']].sort_index()
except Exception:
pass
# get fundamentals
data = utils.get_json(url+'/financials', proxy)
# generic patterns
for key in (
(self._cashflow, 'cashflowStatement', 'cashflowStatements'),
(self._balancesheet, 'balanceSheet', 'balanceSheetStatements'),
(self._financials, 'incomeStatement', 'incomeStatementHistory')
):
item = key[1] + 'History'
if isinstance(data.get(item), dict):
key[0]['yearly'] = cleanup(data[item][key[2]])
item = key[1]+'HistoryQuarterly'
if isinstance(data.get(item), dict):
key[0]['quarterly'] = cleanup(data[item][key[2]])
# earnings
if isinstance(data.get('earnings'), dict):
earnings = data['earnings']['financialsChart']
df = _pd.DataFrame(earnings['yearly']).set_index('date')
df.columns = utils.camel2title(df.columns)
df.index.name = 'Year'
self._earnings['yearly'] = df
df = _pd.DataFrame(earnings['quarterly']).set_index('date')
df.columns = utils.camel2title(df.columns)
df.index.name = 'Quarter'
self._earnings['quarterly'] = df
self._fundamentals = True
def get_recommendations(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._recommendations
if as_dict:
return data.to_dict()
return data
def get_calendar(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._calendar
if as_dict:
return data.to_dict()
return data
def get_major_holders(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._major_holders
if as_dict:
return data.to_dict()
return data
def get_institutional_holders(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._institutional_holders
if as_dict:
return data.to_dict()
return data
def get_info(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._info
if as_dict:
return data.to_dict()
return data
def get_sustainability(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._sustainability
if as_dict:
return data.to_dict()
return data
def get_earnings(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._earnings[freq]
if as_dict:
return data.to_dict()
return data
def get_financials(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._financials[freq]
if as_dict:
return data.to_dict()
return data
def get_balancesheet(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._balancesheet[freq]
if as_dict:
return data.to_dict()
return data
def get_balance_sheet(self, proxy=None, as_dict=False, freq="yearly"):
return self.get_balancesheet(proxy, as_dict, freq)
def get_cashflow(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._cashflow[freq]
if as_dict:
return data.to_dict()
return data
def get_dividends(self, proxy=None):
if self._history is None:
self.history(period="max", proxy=proxy)
dividends = self._history["Dividends"]
return dividends[dividends != 0]
def get_splits(self, proxy=None):
if self._history is None:
self.history(period="max", proxy=proxy)
splits = self._history["Stock Splits"]
return splits[splits != 0]
def get_actions(self, proxy=None):
if self._history is None:
self.history(period="max", proxy=proxy)
actions = self._history[["Dividends", "Stock Splits"]]
return actions[actions != 0].dropna(how='all').fillna(0)
def get_isin(self, proxy=None):
# *** experimental ***
if self._isin is not None:
return self._isin
ticker = self.ticker.upper()
if "-" in ticker or "^" in ticker:
self._isin = '-'
return self._isin
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
q = ticker
self.get_info(proxy=proxy)
if "shortName" in self._info:
q = self._info['shortName']
url = 'https://markets.businessinsider.com/ajax/' \
'SearchController_Suggest?max_results=25&query=%s' \
% urlencode(q)
data = _requests.get(url=url, proxies=proxy).text
search_str = '"{}|'.format(ticker)
if search_str not in data:
if q.lower() in data.lower():
search_str = '"|'
if search_str not in data:
self._isin = '-'
return self._isin
else:
self._isin = '-'
return self._isin
self._isin = data.split(search_str)[1].split('"')[0].split('|')[0]
return self._isin
| TickerBase |
string_split.py | print(" a b ".split(None))
print(" a b ".split(None, 1))
print(" a b ".split(None, 2))
print(" a b c ".split(None, 1))
print(" a b c ".split(None, 0))
print(" a b c ".split(None, -1)) | print("a b".split()) |
|
nodep-keytool.py | #!/usr/bin/python
# pure python keytool
import os
import sys
import json
import base64
import hashlib
import datetime
assert sys.version_info < (3,0), "Python 2.7 required"
TZ = '+02:00'
b = 256
q = 2**255 - 19
l = 2**252 + 27742317777372353535851937790883648493
def H(m):
return hashlib.sha512(m).digest()
def expmod(b,e,m):
if e == 0: return 1
t = expmod(b,e/2,m)**2 % m
if e & 1: t = (t*b) % m
return t
def inv(x):
return expmod(x,q-2,q)
d = -121665 * inv(121666)
I = expmod(2,(q-1)/4,q)
def xrecover(y):
xx = (y*y-1) * inv(d*y*y+1)
x = expmod(xx,(q+3)/8,q)
if (x*x - xx) % q != 0: x = (x*I) % q
if x % 2 != 0: x = q-x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = [Bx % q,By % q]
def edwards(P,Q):
x1 = P[0]
y1 = P[1]
x2 = Q[0]
y2 = Q[1]
x3 = (x1*y2+x2*y1) * inv(1+d*x1*x2*y1*y2)
y3 = (y1*y2+x1*x2) * inv(1-d*x1*x2*y1*y2)
return [x3 % q,y3 % q]
def scalarmult(P,e):
if e == 0: return [0,1]
Q = scalarmult(P,e/2)
Q = edwards(Q,Q)
if e & 1: Q = edwards(Q,P)
return Q
def encodeint(y):
bits = [(y >> i) & 1 for i in range(b)]
return ''.join([chr(sum([bits[i * 8 + j] << j for j in range(8)])) for i in range(b/8)])
def encodepoint(P):
x = P[0]
y = P[1]
bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1]
return ''.join([chr(sum([bits[i * 8 + j] << j for j in range(8)])) for i in range(b/8)])
def bit(h,i):
return (ord(h[i/8]) >> (i%8)) & 1
def publickey(sk):
h = H(sk)
a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2))
A = scalarmult(B,a)
return encodepoint(A)
def Hint(m):
h = H(m)
return sum(2**i * bit(h,i) for i in range(2*b))
def signature(m,sk,pk):
h = H(sk)
a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2))
r = Hint(''.join([h[i] for i in range(b/8,b/4)]) + m)
R = scalarmult(B,r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P):
x = P[0]
y = P[1]
return (-x*x + y*y - 1 - d*x*x*y*y) % q == 0
def decodeint(s):
return sum(2**i * bit(s,i) for i in range(0,b))
def decodepoint(s):
y = sum(2**i * bit(s,i) for i in range(0,b-1))
x = xrecover(y)
if x & 1 != bit(s,b-1): x = q-x
P = [x,y]
if not isoncurve(P):
raise Exception("decoding point that is not on curve")
return P
def checkvalid(s,m,pk):
if len(s) != b/4:
raise Exception("signature length is wrong")
if len(pk) != b/8:
raise Exception("public-key length is wrong")
R = decodepoint(s[0:b/8])
A = decodepoint(pk)
S = decodeint(s[b/8:b/4])
h = Hint(encodepoint(R) + pk + m)
if scalarmult(B,S) != edwards(R,scalarmult(A,h)):
raise Exception("signature does not pass verification")
def generate(seckey='keyseed.dat', entropy=os.urandom):
if os.path.exists(seckey):
print('Error: file already exists {}'.format(seckey))
return 1
sk = entropy(32)
with open(seckey, 'wb') as fp:
fp.write(sk)
os.chmod(seckey, 0o600)
print('Private signing key saved to {}'.format(seckey))
def hash_id(datas):
h1 = hashlib.sha256(datas).digest()
h2 = hashlib.sha256(h1).hexdigest()
return h2[:32]
def dump_binstr(data):
datas = json.dumps(data,
skipkeys=False,
ensure_ascii=False,
sort_keys=True,
separators=(',',':'))
return datas.encode('utf-8')
def data_sign(data, sk, pk):
datas = dump_binstr(data['envelope'])
data['id'] = hash_id(datas)
sign = signature(datas, sk, pk)
data['sign'] = base64.b64encode(sign).rstrip('=')
def data_verify(data, vk):
sign = base64.b64decode(data['sign'])
data_bin = dump_binstr(data['envelope'])
data_hid = hash_id(data_bin)
assert data_hid == data['id'], 'Bad hash ID'
checkvalid(sign, data_bin, vk)
def dump_pretty(data):
datas = json.dumps(data, | ensure_ascii=False,
sort_keys=True)
return datas.encode('utf-8')
def export_pubkey(seckey='keyseed.dat', pubkey='pubkey.json', owner_name='root'):
sk = open(seckey, 'rb').read()
vk = publickey(sk)
vk_s = vk.encode('hex')
dt_now = datetime.datetime.now()
dt_exp = dt_now + datetime.timedelta(days=365)
data = {
'envelope': {
'date': dt_now.isoformat()+TZ,
'model': 'admin/pubkey',
'owner': owner_name,
'payload': {
'algorithm': 'Ed25519',
'owner': owner_name,
'publicKey': vk_s,
'validSince': dt_now.isoformat()+TZ,
'validTill': dt_exp.isoformat()+TZ
}
}
}
data_sign(data, sk, vk)
datas = dump_pretty(data)
with open(pubkey, 'wb') as fp:
fp.write(datas.encode('utf-8'))
print('Public verifying key saved to {}'.format(pubkey))
def verify_file(pubkey='pubkey.json', datafile=None):
if len(sys.argv) > 2:
datafile = sys.argv[2]
if datafile and datafile != pubkey:
print('Load public key data from {}'.format(pubkey))
print('Verify any data json from {}'.format(datafile))
else:
print('Verify public key data from {}'.format(pubkey))
with open(pubkey) as fp:
data = json.loads(fp.read())
vkey_hex = data['envelope']['payload']['publicKey']
vk = vkey_hex.decode('hex')
if datafile and datafile != pubkey:
with open(datafile) as fp:
data = json.loads(fp.read())
try:
data_verify(data, vk)
print("Result OK")
except Exception as e:
print("Can't verify: {}".format(e))
return 1
return 0
def sign_file(seckey='keyseed.dat', pubkey='pubkey.json', datafile='data.json'):
sk = open(seckey, 'rb').read()
vk = publickey(sk)
vk_s = vk.encode('hex')
with open(pubkey) as fp:
pubkey_data = json.loads(fp.read())
pubkey_payload = pubkey_data['envelope']['payload']
vkey_hex = pubkey_payload['publicKey'].encode()
assert vk_s == vkey_hex, 'Keys mismatch'
print('Load data json from {}'.format(datafile))
with open(datafile) as fp:
data = json.loads(fp.read())
if '--set-date' in sys.argv:
dt_now = datetime.datetime.now()
data['envelope']['date'] = dt_now.isoformat()+TZ
data['envelope']['owner'] = pubkey_payload['owner']
data_sign(data, sk, vk)
if '--compact' in sys.argv:
datas = dump_binstr(data)
else:
datas = dump_pretty(data)
print('Save signed json to {}'.format(datafile))
with open(datafile, 'w') as fp:
fp.write(datas)
def usage():
print('Usage: admin-keytool.py command [args]\n')
print(' admin-keytool.py generate\n')
print(' admin-keytool.py export owner_name\n')
print(' admin-keytool.py verify [pubkey.json]\n')
print(' admin-keytool.py sign anydata.json\n')
return 2
def main():
if len(sys.argv) < 2:
return usage()
cmd = sys.argv[1]
if cmd == 'generate':
return generate()
elif cmd == 'export' and len(sys.argv) > 2:
return export_pubkey(owner_name=sys.argv[2])
elif cmd == 'verify':
return verify_file()
elif cmd == 'sign' and len(sys.argv) > 2:
return sign_file(datafile=sys.argv[2])
else:
return usage()
if __name__ == '__main__':
sys.exit(main()) | indent=2, |
serde.rs | // modified by Bevy contributors
use crate::Entity;
use serde::{de::Visitor, Deserialize, Serialize, Serializer};
impl Serialize for Entity {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_u32(self.id())
}
}
impl<'de> Deserialize<'de> for Entity {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
|
}
struct EntityVisitor;
impl<'de> Visitor<'de> for EntityVisitor {
type Value = Entity;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("expected Entity")
}
fn visit_u32<E>(self, v: u32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Entity::new(v))
}
}
| {
deserializer.deserialize_u32(EntityVisitor)
} |
molecule.rs | use atom::Atom;
use ion::Ion;
use namings::*;
use trait_element::Element;
use trait_properties::Properties;
use types::*;
#[derive(Debug, Eq, PartialEq, Clone, Hash)]
/// A molecule
pub struct Molecule {
/// The compounds it contains
pub compounds: Vec<MoleculeCompound>,
}
#[derive(Debug, Eq, PartialEq, Clone, Hash)]
/// A compound of a molecule
pub struct MoleculeCompound {
/// The atom it uses
pub atom: Atom,
/// The amount
pub amount: u8,
}
impl Molecule {
/// Convert a string representation of a molecule into one
/// TODO: Parse parentheses, e.g. Ca3(PO4)2
pub fn from_string(string: &str) -> Option<Molecule> {
let mut compounds = vec![];
let mut token = String::new();
for c in string.chars() {
// Ignore whitespace
if is_whitespace!(c) {
continue;
}
if is_upper!(c) && !token.is_empty() {
let compound = MoleculeCompound::from_string(&token).unwrap();
compounds.push(compound);
token = String::new();
}
token.push(c);
}
// If some tokens remain, convert it into a compound
if !token.is_empty() {
if let Some(compound) = MoleculeCompound::from_string(&token) {
compounds.push(compound);
}
}
if !compounds.is_empty() {
Some(Molecule { compounds })
} else {
None
}
}
}
impl MoleculeCompound {
/// Takes a symbol string representing a MoleculeCompound, and turns it into one
pub fn from_string(string: &str) -> Option<MoleculeCompound> {
let mut amount = 0;
let mut token = String::new();
for c in string.chars() {
if is_letter!(c) {
token.push(c);
} else if is_number!(c) {
amount *= 10;
amount += to_number!(c);
} else {
panic!(
"Invalid character '{}' in string \"{}\" for MoleculeCompound",
c,
string
);
}
}
// If no amount given, assume 1
if amount == 0 {
amount = 1;
}
if let Some(atom) = Atom::from_string(&token) {
Some(MoleculeCompound { atom, amount })
} else {
panic!("Failed to find Atom for {}", &token);
}
}
/// Converts an Atom into a MoleculeCompound, taking care of diatomic ones
pub fn from_atom(atom: Atom) -> MoleculeCompound {
let amount = if atom.diatomic { 2 } else { 1 };
MoleculeCompound { atom, amount }
}
}
impl Properties for Molecule {
fn symbol(&self) -> String {
let mut symbol = String::new();
for compound in &self.compounds {
symbol += &compound.symbol();
}
symbol
}
fn name(&self) -> String {
let mut name = String::new();
// TODO: Add special cases
// NOTE: https://www.youtube.com/watch?v=mlRhLicNo8Q
for compound in &self.compounds {
name += &compound.name();
}
name
}
fn mass(&self) -> AtomMass {
let mut mass = AtomMass::from(0.0);
for compound in &self.compounds {
mass += compound.mass();
}
mass
}
fn is_diatomic(&self) -> bool {
self.compounds.len() == 1 && self.compounds[0].amount == 2 &&
self.compounds[0].atom.diatomic
}
}
impl Properties for MoleculeCompound {
fn symbol(&self) -> String {
let mut symbol = String::new();
symbol += &self.atom.symbol();
if self.amount > 1 {
symbol += &subscript(self.amount);
}
symbol
}
fn | (&self) -> String {
let mut name = String::new();
if self.amount > 1 {
name += &number_to_greek(self.amount);
}
name += &self.atom.name();
name
}
fn mass(&self) -> AtomMass {
self.atom.mass.clone() * (AtomMass_type::from(self.amount))
}
fn is_diatomic(&self) -> bool {
false
}
}
impl Element for Molecule {
fn get_charge(&self) -> Option<AtomCharge> {
Some(AtomCharge::from(0))
}
fn get_molecule(self) -> Option<Molecule> {
Some(self)
}
fn get_ion(self) -> Option<Ion> {
Some(Ion::from_molecule(self.clone()))
}
}
| name |
option1.rs | // option1.rs
// Make me compile! Execute `rustlings hint option1` for hints
// you can modify anything EXCEPT for this function's sig
fn print_number(maybe_number: Option<u16>) {
println!("printing: {}", maybe_number.unwrap());
}
fn main() | {
print_number(Some (13));
print_number(Some (99));
let mut numbers: [Option<u16>; 5] = [None;5];
for iter in 0..5 {
let number_to_add: u16 = {
((iter * 1235) + 2) / (4 * 16)
};
numbers[iter as usize] = Some(number_to_add);
}
} |
|
meter_test.go | // Copyright (c) 2018 The MATRIX Authors
// Distributed under the MIT software license, see the accompanying
// file COPYING or or http://www.opensource.org/licenses/mit-license.php
package metrics
import (
"testing"
"time"
)
func BenchmarkMeter(b *testing.B) {
m := NewMeter()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Mark(1)
}
}
func TestGetOrRegisterMeter(t *testing.T) {
r := NewRegistry()
NewRegisteredMeter("foo", r).Mark(47)
if m := GetOrRegisterMeter("foo", r); 47 != m.Count() {
t.Fatal(m)
}
}
func TestMeterDecay(t *testing.T) {
ma := meterArbiter{
ticker: time.NewTicker(time.Millisecond),
meters: make(map[*StandardMeter]struct{}),
}
m := newStandardMeter()
ma.meters[m] = struct{}{}
go ma.tick()
m.Mark(1)
rateMean := m.RateMean()
time.Sleep(100 * time.Millisecond)
if m.RateMean() >= rateMean {
t.Error("m.RateMean() didn't decrease")
}
}
func TestMeterNonzero(t *testing.T) {
m := NewMeter()
m.Mark(3)
if count := m.Count(); 3 != count {
t.Errorf("m.Count(): 3 != %v\n", count)
}
}
func TestMeterStop(t *testing.T) {
l := len(arbiter.meters)
m := NewMeter()
if len(arbiter.meters) != l+1 {
t.Errorf("arbiter.meters: %d != %d\n", l+1, len(arbiter.meters))
}
m.Stop()
if len(arbiter.meters) != l {
t.Errorf("arbiter.meters: %d != %d\n", l, len(arbiter.meters))
}
}
func TestMeterSnapshot(t *testing.T) { |
func TestMeterZero(t *testing.T) {
m := NewMeter()
if count := m.Count(); 0 != count {
t.Errorf("m.Count(): 0 != %v\n", count)
}
}
|
m := NewMeter()
m.Mark(1)
if snapshot := m.Snapshot(); m.RateMean() != snapshot.RateMean() {
t.Fatal(snapshot)
}
}
|
SUP_INS_ANCH.py | # Created by Andrzej Lach @ 2021
# https://github.com/AndrzejLach89
from aqa.math import *
from varmain.primitiv import *
from varmain.custom import *
import math
@activate(Group="Support", Ports=1, TooltipShort="Support - insulated, anchor", TooltipLong="Support - insulated, anchor", LengthUnit="mm")
@group("MainDimensions")
@param(D=LENGTH, TooltipShort="Pipe diameter")
@param(H=LENGTH, TooltipShort="Height", Ask4Dist=True)
@param(CL=LENGTH, TooltipShort="Clamp length")
@param(CT=LENGTH, TooltipShort="Clamp thickness")
@param(CW=LENGTH, TooltipShort="Clamp width")
@param(CO=LENGTH, TooltipShort="Clamp offset")
@param(W=LENGTH, TooltipShort="Bottom plate width")
@param(L=LENGTH, TooltipShort="Bottom plate length")
@param(T=LENGTH, TooltipShort="Plate thickness")
@param(NUT=LENGTH, TooltipShort="Nut size (Mxx)")
@param(PA=LENGTH, TooltipShort="Front/back plate width")
@param(PT=LENGTH, TooltipShort="Front/back plate thickness")
@param(LT=LENGTH, TooltipShort="Total length")
def SUP_INS_ANCH(s, D=114.3, H=192, CL=50, CT=8, W=100, L=200, T=11, CW= 226, CO=10, NUT=16, PA=60, PT=8, LT=230, ID='SUP_INS_ANCH', **kw):
nutSizes = {
8: {'h': 6.500, 'd': 13.000, 'x': 7.5056},
12: {'h': 10.000, 'd': 18.000, 'x': 10.3923},
16: {'h': 13.000, 'd': 24.000, 'x': 13.8564},
20: {'h': 16.000, 'd': 30.000, 'x': 17.3205},
24: {'h': 19.000, 'd': 36.000, 'x': 20.7846}
}
if NUT not in nutSizes:
NUT = min(nutSizes, key=lambda x:abs(x-NUT))
nutType = nutSizes[NUT]
if D <= 0 or H <=0 or CL <= 0 or CT <= 0 or T<=0 or PA<=0 or PT<=0:
return
if LT < L + 2*PT:
LT = L + 2*PT
if W < T:
W = T
body = BOX(s, L=T, W=H-D/2-T/2, H=L).translate((0, 0, (H-D/2-T/2)/2-H))
hPlate = BOX(s, L=W, W=T, H=L).translate((0, 0, T/2 - H))
body.uniteWith(hPlate)
hPlate.erase()
endPlateTranslations = ((L/2+PT/2, 0, -H/2+T/4), (-(L/2+PT/2), 0, -H/2+T/4))
endPlates = []
for i in endPlateTranslations:
endPlates.append(BOX(s,L=PA, W=H, H=PT).translate(i))
for i in endPlates:
endPlateCut = CYLINDER(s, R=D/2 + CT, H=LT, O=0).rotateY(90).translate((-LT/2, 0, 0))
i.subtractFrom(endPlateCut)
endPlateCut.erase()
for i in endPlates:
body.uniteWith(i)
i.erase()
endPlates.clear()
clamps = []
cnt = 0
clampOffset = ((CL/2 - LT/2, 0, 0), (-CL/2 + LT/2, 0, 0))
# bolts
nutHeight = nutType['h']
nutLength = nutType['d']
nutWidth = nutType['x']
cutRadius = math.sqrt(math.pow(nutHeight, 2) + math.pow(nutLength/2, 2))
for off in clampOffset:
clamps.append(CYLINDER(s, R=D/2+CT, H=CL, O=D/2).rotateY(90).translate((-CL/2, 0, 0)))
clampH = BOX(s, L=CW, W=2*CT+CO, H=CL)
vPlateCut = CYLINDER(s, R=D/2, H=CL, O=0).rotateY(90).translate((-CL/2, 0, 0))
clampH.subtractFrom(vPlateCut)
clamps[cnt].uniteWith(clampH)
if CO > 0:
clampCut = BOX(s, L=CW, W=CO, H=CL)
clamps[cnt].subtractFrom(clampCut)
clamps[cnt].translate(clampOffset[cnt])
| mainOffsets = ((0, CW/2-(CW/2 - D/2 - CT)/2, 0), (0, -CW/2+(CW/2 - D/2 - CT)/2, 0))
boltH = 2*nutHeight + 2*CT + CO + 5
boltR = NUT/2
boltOffset = (0, 0, -boltH + CO/2 + CT + nutHeight)
bolts = []
nut1offset = (0, 0, nutHeight/2 + CO/2 + CT)
nut2offset = (0, 0, -nutHeight/2 - CO/2 - CT)
nutOffsets = (nut1offset, nut2offset)
for x in mainOffsets:
bolt = CYLINDER(s, R=boltR, H=boltH, O=0).translate(boltOffset)
boltHole = CYLINDER(s, R=boltR+0.5, H=boltH, O=0).translate(boltOffset)
nutParts = []
nc = 0
for i in nutOffsets:
nutParts.append(BOX(s, L=nutWidth, W=nutHeight, H=nutLength).translate(i))
p1 = BOX(s, L=nutWidth, W=nutHeight, H=nutLength).rotateZ(60).translate(i)
p2 = BOX(s, L=nutWidth, W=nutHeight, H=nutLength).rotateZ(120).translate(i)
#nutParts.append(BOX(s, L=nutWidth, W=nutHeight, H=nutLength).rotateZ(60).translate(i))
#nutParts.append(BOX(s, L=nutWidth, W=nutHeight, H=nutLength).rotateZ(120).translate(i))
c1 = HALFSPHERE(s, R=cutRadius).translate(i).translate((0, 0, -nutHeight/2))
nutParts[nc].uniteWith(p1)
nutParts[nc].uniteWith(p2)
nutParts[nc].intersectWith(c1)
p1.erase()
p2.erase()
c1.erase()
if nc == 1:
c2 = HALFSPHERE(s, R=cutRadius).rotateX(180).translate(i).translate((0, 0, nutHeight/2))
nutParts[nc].intersectWith(c2)
c2.erase()
nc += 1
for i in nutParts:
bolt.uniteWith(i)
bolt.translate(x)
boltHole.translate(x)
bolt.translate(clampOffset[cnt])
boltHole.translate(clampOffset[cnt])
clamps[cnt].subtractFrom(boltHole)
clamps[cnt].uniteWith(bolt)
bolt.erase()
boltHole.erase()
body.uniteWith(clamps[cnt])
cnt += 1
clamps.clear()
s.setPoint((0.000, 0.000, 0.000), (1.000, 0.000, 0.000))
s.setLinearDimension('H',(0, 0, 0), (0, 0, -H)) | |
svgbuilder.js | /*
Copyright (c) 2020 Trashbots - SDG
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
| The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/** @module svgBuilder */
module.exports = function () {
/** @class svgBuilder */
var svgBuilder = {};
svgBuilder.ns = 'http://www.w3.org/2000/svg';
svgBuilder.xlinkns = 'http://www.w3.org/1999/xlink';
/**
* A class for building SVG path descriptions
*/
/** @summary pathBuilder */
svgBuilder.pathBuilder = {
//* relative pen relocation with no drawing
move: function (dx, dy) {
return 'm' + dx + ' ' + dy + ' ';
},
//* realtive horizontal line
hline: function (dx) {
return 'h' + dx + ' ';
},
//* relative vertical line
vline: function (dy) {
return 'v' + dy + ' ';
},
//* relative straight line
line: function (dx, dy) {
return 'l' + dx + ' ' + dy + ' ';
},
//* arc path element
arc: function (radius, degrees, large, sweep, dx, dy) {
var text = 'a' + radius + ' ' + radius + ' ' + degrees;
text += ' ' + large + ' ' + sweep + ' ' + dx + ' ' + dy + ' ';
return text;
},
//* path closing
close: function () {
return 'z ';
}
};
/** @function */
svgBuilder.createUse = function createSymbolUse(elementClass, symbolName) {
var elt = document.createElementNS(svgBuilder.ns, 'use');
elt.setAttribute('class', elementClass);
elt.setAttributeNS(svgBuilder.xlinkns, 'xlink:href', symbolName);
return elt;
};
svgBuilder.resizeRect = function resizeRect(elt, w, h) {
elt.setAttribute('width', String(w) + 'px');
elt.setAttribute('height', String(h) + 'px');
};
svgBuilder.translateXY = function translateXY(elt, x, y) {
elt.setAttribute('transform', 'translate (' + String(x) + ' ' + String(y) + ')');
};
svgBuilder.createRect = function createRect(elementClass, x, y, w, h, rxy) {
var elt = document.createElementNS(svgBuilder.ns, 'rect');
elt.setAttribute('class', elementClass);
elt.setAttribute('x', x);
elt.setAttribute('y', y);
this.resizeRect(elt, w, h);
if (rxy !== undefined) {
elt.setAttribute('rx', rxy);
elt.setAttribute('ry', rxy);
}
return elt;
};
svgBuilder.createCircle = function creatCircle(elementClass, cx, cy, r) {
var elt = document.createElementNS(svgBuilder.ns, 'circle');
elt.setAttribute('class', elementClass);
elt.setAttribute('cx', cx);
elt.setAttribute('cy', cy);
elt.setAttribute('r', r);
return elt;
};
svgBuilder.createGroup = function createGroup(elementClass, x, y) {
var elt = document.createElementNS(svgBuilder.ns, 'g');
elt.setAttribute('class', elementClass);
elt.setAttribute ('transform', 'translate (' + x + ' ' + y + ')');
return elt;
};
svgBuilder.createText = function createText(elementClass, x, y, text) {
var elt = document.createElementNS(svgBuilder.ns, 'text');
elt.setAttribute('class', elementClass);
elt.setAttribute('x', x);
elt.setAttribute('y', y);
elt.textContent = text;
return elt;
};
svgBuilder.createPath = function createText(elementClass, pathData) {
var elt = document.createElementNS(svgBuilder.ns, 'path');
elt.setAttribute('class', elementClass);
elt.setAttribute('d', pathData);
return elt;
};
return svgBuilder;
}(); | |
primitive_reuse_peer.rs | extern crate futures;
extern crate tokio_io;
use futures::future::ok;
use std::cell::RefCell;
use std::rc::Rc;
use super::{BoxedNewPeerFuture, Peer};
use std::io::{Error as IoError, Read, Write};
use tokio_io::{AsyncRead, AsyncWrite};
use super::{once, ConstructParams, PeerConstructor, Specifier};
use futures::Future;
use std::ops::DerefMut;
#[derive(Debug)]
pub struct Reuser(pub Rc<dyn Specifier>);
impl Specifier for Reuser {
fn construct(&self, p: ConstructParams) -> PeerConstructor {
let send_zero_msg_on_disconnect = p.program_options.reuser_send_zero_msg_on_disconnect;
let reuser = p.global(GlobalState::default).clone();
let mut reuser = reuser.clone();
let l2r = p.left_to_right.clone();
let inner = || self.0.construct(p).get_only_first_conn(l2r);
once(connection_reuser(
&mut reuser,
inner,
send_zero_msg_on_disconnect,
))
}
specifier_boilerplate!(singleconnect has_subspec globalstate);
self_0_is_subspecifier!(...);
}
specifier_class!(
name = ReuserClass,
target = Reuser,
prefixes = ["reuse-raw:", "raw-reuse:"],
arg_handling = subspec,
overlay = true,
MessageBoundaryStatusDependsOnInnerType,
SingleConnect,
help = r#"
Reuse subspecifier for serving multiple clients: unpredictable mode. [A]
Better used with --unidirectional, otherwise replies get directed to
random connected client.
Example: Forward multiple parallel WebSocket connections to a single persistent TCP connection
websocat -u ws-l:0.0.0.0:8800 reuse:tcp:127.0.0.1:4567
Example (unreliable): don't disconnect SSH when websocket reconnects
websocat ws-l:[::]:8088 reuse:tcp:127.0.0.1:22 |
#[derive(Default, Clone)]
pub struct GlobalState(PeerSlot);
#[derive(Clone)]
struct PeerHandle(PeerSlot, bool);
impl Read for PeerHandle {
fn read(&mut self, b: &mut [u8]) -> Result<usize, IoError> {
if let Some(ref mut x) = *self.0.borrow_mut().deref_mut() {
x.0.read(b)
} else {
unreachable!()
}
}
}
impl AsyncRead for PeerHandle {}
impl Write for PeerHandle {
fn write(&mut self, b: &[u8]) -> Result<usize, IoError> {
if let Some(ref mut x) = *self.0.borrow_mut().deref_mut() {
x.1.write(b)
} else {
unreachable!()
}
}
fn flush(&mut self) -> Result<(), IoError> {
if let Some(ref mut x) = *self.0.borrow_mut().deref_mut() {
x.1.flush()
} else {
unreachable!()
}
}
}
impl AsyncWrite for PeerHandle {
fn shutdown(&mut self) -> futures::Poll<(), IoError> {
if self.1 {
let _ = self.write(b"");
}
if let Some(ref mut _x) = *self.0.borrow_mut().deref_mut() {
// Ignore shutdown attempts
Ok(futures::Async::Ready(()))
//_x.1.shutdown()
} else {
unreachable!()
}
}
}
pub fn connection_reuser<F: FnOnce() -> BoxedNewPeerFuture>(
s: &mut GlobalState,
inner_peer: F,
send_zero_msg_on_disconnect: bool,
) -> BoxedNewPeerFuture {
let need_init = s.0.borrow().is_none();
let rc = s.0.clone();
if need_init {
info!("Initializing");
Box::new(inner_peer().and_then(move |inner| {
{
let mut b = rc.borrow_mut();
let x: &mut Option<Peer> = b.deref_mut();
*x = Some(inner);
}
let ps: PeerSlot = rc.clone();
let ph1 = PeerHandle(ps, send_zero_msg_on_disconnect);
let ph2 = ph1.clone();
let peer = Peer::new(ph1, ph2, None /* TODO */);
ok(peer)
})) as BoxedNewPeerFuture
} else {
info!("Reusing");
let ps: PeerSlot = rc.clone();
let ph1 = PeerHandle(ps, send_zero_msg_on_disconnect);
let ph2 = ph1.clone();
let peer = Peer::new(ph1, ph2, None /* TODO */);
Box::new(ok(peer)) as BoxedNewPeerFuture
}
} | "#
);
type PeerSlot = Rc<RefCell<Option<Peer>>>; |
asset_group_listing_group_filter_service.pb.go | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. | // Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.18.1
// source: google/ads/googleads/v9/services/asset_group_listing_group_filter_service.proto
package services
import (
context "context"
proto "github.com/golang/protobuf/proto"
enums "github.com/scotthenley/go-googleads/pb/v9/enums"
resources "github.com/scotthenley/go-googleads/pb/v9/resources"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// Request message for
// [AssetGroupListingGroupFilterService.MutateAssetGroupListingGroupFilters][google.ads.googleads.v9.services.AssetGroupListingGroupFilterService.MutateAssetGroupListingGroupFilters].
// partial_failure is not supported because the tree needs to be validated
// together.
type MutateAssetGroupListingGroupFiltersRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The ID of the customer whose asset group listing group filters are being
// modified.
CustomerId string `protobuf:"bytes,1,opt,name=customer_id,json=customerId,proto3" json:"customer_id,omitempty"`
// Required. The list of operations to perform on individual asset group listing group
// filters.
Operations []*AssetGroupListingGroupFilterOperation `protobuf:"bytes,2,rep,name=operations,proto3" json:"operations,omitempty"`
// If true, the request is validated but not executed. Only errors are
// returned, not results.
ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
// The response content type setting. Determines whether the mutable resource
// or just the resource name should be returned post mutation.
ResponseContentType enums.ResponseContentTypeEnum_ResponseContentType `protobuf:"varint,4,opt,name=response_content_type,json=responseContentType,proto3,enum=google.ads.googleads.v9.enums.ResponseContentTypeEnum_ResponseContentType" json:"response_content_type,omitempty"`
}
func (x *MutateAssetGroupListingGroupFiltersRequest) Reset() {
*x = MutateAssetGroupListingGroupFiltersRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MutateAssetGroupListingGroupFiltersRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MutateAssetGroupListingGroupFiltersRequest) ProtoMessage() {}
func (x *MutateAssetGroupListingGroupFiltersRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MutateAssetGroupListingGroupFiltersRequest.ProtoReflect.Descriptor instead.
func (*MutateAssetGroupListingGroupFiltersRequest) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDescGZIP(), []int{0}
}
func (x *MutateAssetGroupListingGroupFiltersRequest) GetCustomerId() string {
if x != nil {
return x.CustomerId
}
return ""
}
func (x *MutateAssetGroupListingGroupFiltersRequest) GetOperations() []*AssetGroupListingGroupFilterOperation {
if x != nil {
return x.Operations
}
return nil
}
func (x *MutateAssetGroupListingGroupFiltersRequest) GetValidateOnly() bool {
if x != nil {
return x.ValidateOnly
}
return false
}
func (x *MutateAssetGroupListingGroupFiltersRequest) GetResponseContentType() enums.ResponseContentTypeEnum_ResponseContentType {
if x != nil {
return x.ResponseContentType
}
return enums.ResponseContentTypeEnum_UNSPECIFIED
}
// A single operation (create, remove) on an asset group listing group filter.
type AssetGroupListingGroupFilterOperation struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// FieldMask that determines which resource fields are modified in an update.
UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,4,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
// The mutate operation.
//
// Types that are assignable to Operation:
// *AssetGroupListingGroupFilterOperation_Create
// *AssetGroupListingGroupFilterOperation_Update
// *AssetGroupListingGroupFilterOperation_Remove
Operation isAssetGroupListingGroupFilterOperation_Operation `protobuf_oneof:"operation"`
}
func (x *AssetGroupListingGroupFilterOperation) Reset() {
*x = AssetGroupListingGroupFilterOperation{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *AssetGroupListingGroupFilterOperation) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AssetGroupListingGroupFilterOperation) ProtoMessage() {}
func (x *AssetGroupListingGroupFilterOperation) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AssetGroupListingGroupFilterOperation.ProtoReflect.Descriptor instead.
func (*AssetGroupListingGroupFilterOperation) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDescGZIP(), []int{1}
}
func (x *AssetGroupListingGroupFilterOperation) GetUpdateMask() *fieldmaskpb.FieldMask {
if x != nil {
return x.UpdateMask
}
return nil
}
func (m *AssetGroupListingGroupFilterOperation) GetOperation() isAssetGroupListingGroupFilterOperation_Operation {
if m != nil {
return m.Operation
}
return nil
}
func (x *AssetGroupListingGroupFilterOperation) GetCreate() *resources.AssetGroupListingGroupFilter {
if x, ok := x.GetOperation().(*AssetGroupListingGroupFilterOperation_Create); ok {
return x.Create
}
return nil
}
func (x *AssetGroupListingGroupFilterOperation) GetUpdate() *resources.AssetGroupListingGroupFilter {
if x, ok := x.GetOperation().(*AssetGroupListingGroupFilterOperation_Update); ok {
return x.Update
}
return nil
}
func (x *AssetGroupListingGroupFilterOperation) GetRemove() string {
if x, ok := x.GetOperation().(*AssetGroupListingGroupFilterOperation_Remove); ok {
return x.Remove
}
return ""
}
type isAssetGroupListingGroupFilterOperation_Operation interface {
isAssetGroupListingGroupFilterOperation_Operation()
}
type AssetGroupListingGroupFilterOperation_Create struct {
// Create operation: No resource name is expected for the new asset group
// listing group filter.
Create *resources.AssetGroupListingGroupFilter `protobuf:"bytes,1,opt,name=create,proto3,oneof"`
}
type AssetGroupListingGroupFilterOperation_Update struct {
// Update operation: The asset group listing group filter is expected to
// have a valid resource name.
Update *resources.AssetGroupListingGroupFilter `protobuf:"bytes,2,opt,name=update,proto3,oneof"`
}
type AssetGroupListingGroupFilterOperation_Remove struct {
// Remove operation: A resource name for the removed asset group listing
// group filter is expected, in this format:
// `customers/{customer_id}/assetGroupListingGroupFilters/{asset_group_id}~{listing_group_filter_id}`
// An entity can be removed only if it's not referenced by other
// parent_listing_group_id. If multiple entities are being deleted, the
// mutates must be in the correct order.
Remove string `protobuf:"bytes,3,opt,name=remove,proto3,oneof"`
}
func (*AssetGroupListingGroupFilterOperation_Create) isAssetGroupListingGroupFilterOperation_Operation() {
}
func (*AssetGroupListingGroupFilterOperation_Update) isAssetGroupListingGroupFilterOperation_Operation() {
}
func (*AssetGroupListingGroupFilterOperation_Remove) isAssetGroupListingGroupFilterOperation_Operation() {
}
// Response message for an asset group listing group filter mutate.
type MutateAssetGroupListingGroupFiltersResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// All results for the mutate.
Results []*MutateAssetGroupListingGroupFilterResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"`
}
func (x *MutateAssetGroupListingGroupFiltersResponse) Reset() {
*x = MutateAssetGroupListingGroupFiltersResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MutateAssetGroupListingGroupFiltersResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MutateAssetGroupListingGroupFiltersResponse) ProtoMessage() {}
func (x *MutateAssetGroupListingGroupFiltersResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MutateAssetGroupListingGroupFiltersResponse.ProtoReflect.Descriptor instead.
func (*MutateAssetGroupListingGroupFiltersResponse) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDescGZIP(), []int{2}
}
func (x *MutateAssetGroupListingGroupFiltersResponse) GetResults() []*MutateAssetGroupListingGroupFilterResult {
if x != nil {
return x.Results
}
return nil
}
// The result for the asset group listing group filter mutate.
type MutateAssetGroupListingGroupFilterResult struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Returned for successful operations.
ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
// The mutated AssetGroupListingGroupFilter with only mutable fields after
// mutate. The field will only be returned when response_content_type is set
// to "MUTABLE_RESOURCE".
AssetGroupListingGroupFilter *resources.AssetGroupListingGroupFilter `protobuf:"bytes,2,opt,name=asset_group_listing_group_filter,json=assetGroupListingGroupFilter,proto3" json:"asset_group_listing_group_filter,omitempty"`
}
func (x *MutateAssetGroupListingGroupFilterResult) Reset() {
*x = MutateAssetGroupListingGroupFilterResult{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MutateAssetGroupListingGroupFilterResult) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MutateAssetGroupListingGroupFilterResult) ProtoMessage() {}
func (x *MutateAssetGroupListingGroupFilterResult) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MutateAssetGroupListingGroupFilterResult.ProtoReflect.Descriptor instead.
func (*MutateAssetGroupListingGroupFilterResult) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDescGZIP(), []int{3}
}
func (x *MutateAssetGroupListingGroupFilterResult) GetResourceName() string {
if x != nil {
return x.ResourceName
}
return ""
}
func (x *MutateAssetGroupListingGroupFilterResult) GetAssetGroupListingGroupFilter() *resources.AssetGroupListingGroupFilter {
if x != nil {
return x.AssetGroupListingGroupFilter
}
return nil
}
var File_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto protoreflect.FileDescriptor
var file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDesc = []byte{
0x0a, 0x4f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x39, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x73, 0x2f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6c,
0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x66, 0x69, 0x6c,
0x74, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x39, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x1a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x39, 0x2f, 0x65, 0x6e, 0x75,
0x6d, 0x73, 0x2f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74,
0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x48,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x39, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x73, 0x2f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6c, 0x69,
0x73, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x66, 0x69, 0x6c, 0x74,
0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61,
0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c,
0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x22, 0xe5, 0x02, 0x0a, 0x2a, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x41, 0x73, 0x73,
0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x47, 0x72,
0x6f, 0x75, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x12, 0x24, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, 0x75, 0x73,
0x74, 0x6f, 0x6d, 0x65, 0x72, 0x49, 0x64, 0x12, 0x6c, 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
0x64, 0x73, 0x2e, 0x76, 0x39, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x41,
0x73, 0x73, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67,
0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4f, 0x70, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61,
0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x7e, 0x0a, 0x15, 0x72, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74,
0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73,
0x2e, 0x76, 0x39, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x45, 0x6e, 0x75,
0x6d, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43,
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0xc1, 0x02, 0x0a, 0x25, 0x41,
0x73, 0x73, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67,
0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4f, 0x70, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d,
0x61, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73,
0x6b, 0x12, 0x59, 0x0a, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x39, 0x2e, 0x72, 0x65, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70,
0x4c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6c, 0x74,
0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x59, 0x0a, 0x06,
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x61, 0x64, 0x73, 0x2e, 0x76, 0x39, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x69,
0x6e, 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52,
0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76,
0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76,
0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x93,
0x01, 0x0a, 0x2b, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x47, 0x72,
0x6f, 0x75, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46,
0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64,
0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x4a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x39, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x73, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x47, 0x72,
0x6f, 0x75, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46,
0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73,
0x75, 0x6c, 0x74, 0x73, 0x22, 0xd9, 0x01, 0x0a, 0x28, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x41,
0x73, 0x73, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67,
0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c,
0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61,
0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x87, 0x01, 0x0a, 0x20, 0x61, 0x73, 0x73, 0x65, 0x74,
0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x67,
0x72, 0x6f, 0x75, 0x70, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x39, 0x2e, 0x72, 0x65, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70,
0x4c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6c, 0x74,
0x65, 0x72, 0x52, 0x1c, 0x61, 0x73, 0x73, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4c, 0x69,
0x73, 0x74, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
0x32, 0x99, 0x03, 0x0a, 0x23, 0x41, 0x73, 0x73, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4c,
0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65,
0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xaa, 0x02, 0x0a, 0x23, 0x4d, 0x75, 0x74,
0x61, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4c, 0x69, 0x73,
0x74, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73,
0x12, 0x4c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x39, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x47,
0x72, 0x6f, 0x75, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70,
0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4d,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x39, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x73, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x47, 0x72, 0x6f,
0x75, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69,
0x6c, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x66, 0x82,
0xd3, 0xe4, 0x93, 0x02, 0x47, 0x22, 0x42, 0x2f, 0x76, 0x39, 0x2f, 0x63, 0x75, 0x73, 0x74, 0x6f,
0x6d, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x69,
0x64, 0x3d, 0x2a, 0x7d, 0x2f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4c,
0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65,
0x72, 0x73, 0x3a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x16, 0x63,
0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x6f, 0x70, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x45, 0xca, 0x41, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
0x6f, 0x6d, 0xd2, 0x41, 0x27, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x61, 0x75, 0x74, 0x68, 0x2f, 0x61, 0x64, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x42, 0x8f, 0x02, 0x0a,
0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x39, 0x2e, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x28, 0x41, 0x73, 0x73, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75,
0x70, 0x4c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6c,
0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
0x01, 0x5a, 0x48, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x39, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x73, 0x3b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0xa2, 0x02, 0x03, 0x47, 0x41,
0x41, 0xaa, 0x02, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x73, 0x2e, 0x47,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x2e, 0x56, 0x39, 0x2e, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x73, 0xca, 0x02, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x41, 0x64,
0x73, 0x5c, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x5c, 0x56, 0x39, 0x5c, 0x53,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0xea, 0x02, 0x24, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x3a, 0x3a, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73,
0x3a, 0x3a, 0x56, 0x39, 0x3a, 0x3a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDescOnce sync.Once
file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDescData = file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDesc
)
func file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDescGZIP() []byte {
file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDescOnce.Do(func() {
file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDescData)
})
return file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDescData
}
var file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_goTypes = []interface{}{
(*MutateAssetGroupListingGroupFiltersRequest)(nil), // 0: google.ads.googleads.v9.services.MutateAssetGroupListingGroupFiltersRequest
(*AssetGroupListingGroupFilterOperation)(nil), // 1: google.ads.googleads.v9.services.AssetGroupListingGroupFilterOperation
(*MutateAssetGroupListingGroupFiltersResponse)(nil), // 2: google.ads.googleads.v9.services.MutateAssetGroupListingGroupFiltersResponse
(*MutateAssetGroupListingGroupFilterResult)(nil), // 3: google.ads.googleads.v9.services.MutateAssetGroupListingGroupFilterResult
(enums.ResponseContentTypeEnum_ResponseContentType)(0), // 4: google.ads.googleads.v9.enums.ResponseContentTypeEnum.ResponseContentType
(*fieldmaskpb.FieldMask)(nil), // 5: google.protobuf.FieldMask
(*resources.AssetGroupListingGroupFilter)(nil), // 6: google.ads.googleads.v9.resources.AssetGroupListingGroupFilter
}
var file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_depIdxs = []int32{
1, // 0: google.ads.googleads.v9.services.MutateAssetGroupListingGroupFiltersRequest.operations:type_name -> google.ads.googleads.v9.services.AssetGroupListingGroupFilterOperation
4, // 1: google.ads.googleads.v9.services.MutateAssetGroupListingGroupFiltersRequest.response_content_type:type_name -> google.ads.googleads.v9.enums.ResponseContentTypeEnum.ResponseContentType
5, // 2: google.ads.googleads.v9.services.AssetGroupListingGroupFilterOperation.update_mask:type_name -> google.protobuf.FieldMask
6, // 3: google.ads.googleads.v9.services.AssetGroupListingGroupFilterOperation.create:type_name -> google.ads.googleads.v9.resources.AssetGroupListingGroupFilter
6, // 4: google.ads.googleads.v9.services.AssetGroupListingGroupFilterOperation.update:type_name -> google.ads.googleads.v9.resources.AssetGroupListingGroupFilter
3, // 5: google.ads.googleads.v9.services.MutateAssetGroupListingGroupFiltersResponse.results:type_name -> google.ads.googleads.v9.services.MutateAssetGroupListingGroupFilterResult
6, // 6: google.ads.googleads.v9.services.MutateAssetGroupListingGroupFilterResult.asset_group_listing_group_filter:type_name -> google.ads.googleads.v9.resources.AssetGroupListingGroupFilter
0, // 7: google.ads.googleads.v9.services.AssetGroupListingGroupFilterService.MutateAssetGroupListingGroupFilters:input_type -> google.ads.googleads.v9.services.MutateAssetGroupListingGroupFiltersRequest
2, // 8: google.ads.googleads.v9.services.AssetGroupListingGroupFilterService.MutateAssetGroupListingGroupFilters:output_type -> google.ads.googleads.v9.services.MutateAssetGroupListingGroupFiltersResponse
8, // [8:9] is the sub-list for method output_type
7, // [7:8] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() {
file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_init()
}
func file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_init() {
if File_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MutateAssetGroupListingGroupFiltersRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AssetGroupListingGroupFilterOperation); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MutateAssetGroupListingGroupFiltersResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MutateAssetGroupListingGroupFilterResult); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes[1].OneofWrappers = []interface{}{
(*AssetGroupListingGroupFilterOperation_Create)(nil),
(*AssetGroupListingGroupFilterOperation_Update)(nil),
(*AssetGroupListingGroupFilterOperation_Remove)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDesc,
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_goTypes,
DependencyIndexes: file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_depIdxs,
MessageInfos: file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_msgTypes,
}.Build()
File_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto = out.File
file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_rawDesc = nil
file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_goTypes = nil
file_google_ads_googleads_v9_services_asset_group_listing_group_filter_service_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// AssetGroupListingGroupFilterServiceClient is the client API for AssetGroupListingGroupFilterService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type AssetGroupListingGroupFilterServiceClient interface {
// Creates, updates or removes asset group listing group filters. Operation
// statuses are returned.
MutateAssetGroupListingGroupFilters(ctx context.Context, in *MutateAssetGroupListingGroupFiltersRequest, opts ...grpc.CallOption) (*MutateAssetGroupListingGroupFiltersResponse, error)
}
type assetGroupListingGroupFilterServiceClient struct {
cc grpc.ClientConnInterface
}
func NewAssetGroupListingGroupFilterServiceClient(cc grpc.ClientConnInterface) AssetGroupListingGroupFilterServiceClient {
return &assetGroupListingGroupFilterServiceClient{cc}
}
func (c *assetGroupListingGroupFilterServiceClient) MutateAssetGroupListingGroupFilters(ctx context.Context, in *MutateAssetGroupListingGroupFiltersRequest, opts ...grpc.CallOption) (*MutateAssetGroupListingGroupFiltersResponse, error) {
out := new(MutateAssetGroupListingGroupFiltersResponse)
err := c.cc.Invoke(ctx, "/google.ads.googleads.v9.services.AssetGroupListingGroupFilterService/MutateAssetGroupListingGroupFilters", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// AssetGroupListingGroupFilterServiceServer is the server API for AssetGroupListingGroupFilterService service.
type AssetGroupListingGroupFilterServiceServer interface {
// Creates, updates or removes asset group listing group filters. Operation
// statuses are returned.
MutateAssetGroupListingGroupFilters(context.Context, *MutateAssetGroupListingGroupFiltersRequest) (*MutateAssetGroupListingGroupFiltersResponse, error)
}
// UnimplementedAssetGroupListingGroupFilterServiceServer can be embedded to have forward compatible implementations.
type UnimplementedAssetGroupListingGroupFilterServiceServer struct {
}
func (*UnimplementedAssetGroupListingGroupFilterServiceServer) MutateAssetGroupListingGroupFilters(context.Context, *MutateAssetGroupListingGroupFiltersRequest) (*MutateAssetGroupListingGroupFiltersResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method MutateAssetGroupListingGroupFilters not implemented")
}
func RegisterAssetGroupListingGroupFilterServiceServer(s *grpc.Server, srv AssetGroupListingGroupFilterServiceServer) {
s.RegisterService(&_AssetGroupListingGroupFilterService_serviceDesc, srv)
}
func _AssetGroupListingGroupFilterService_MutateAssetGroupListingGroupFilters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(MutateAssetGroupListingGroupFiltersRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AssetGroupListingGroupFilterServiceServer).MutateAssetGroupListingGroupFilters(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.ads.googleads.v9.services.AssetGroupListingGroupFilterService/MutateAssetGroupListingGroupFilters",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AssetGroupListingGroupFilterServiceServer).MutateAssetGroupListingGroupFilters(ctx, req.(*MutateAssetGroupListingGroupFiltersRequest))
}
return interceptor(ctx, in, info, handler)
}
var _AssetGroupListingGroupFilterService_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.ads.googleads.v9.services.AssetGroupListingGroupFilterService",
HandlerType: (*AssetGroupListingGroupFilterServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "MutateAssetGroupListingGroupFilters",
Handler: _AssetGroupListingGroupFilterService_MutateAssetGroupListingGroupFilters_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/ads/googleads/v9/services/asset_group_listing_group_filter_service.proto",
} | // You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// |
options.go | package pg
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"time"
"github.com/oelayan/pg/v10/internal/pool"
)
// Options contains database connection options.
type Options struct {
// Network type, either tcp or unix.
// Default is tcp.
Network string
// TCP host:port or Unix socket depending on Network.
Addr string
// Dialer creates new network connection and has priority over
// Network and Addr options.
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
// Hook that is called after new connection is established
// and user is authenticated.
OnConnect func(ctx context.Context, cn *Conn) error
User string
Password string
Database string
// ApplicationName is the application name. Used in logs on Pg side.
// Only available from pg-9.0.
ApplicationName string
// TLS config for secure connections.
TLSConfig *tls.Config
// Dial timeout for establishing new connections.
// Default is 5 seconds.
DialTimeout time.Duration
// Timeout for socket reads. If reached, commands will fail
// with a timeout instead of blocking.
ReadTimeout time.Duration
// Timeout for socket writes. If reached, commands will fail
// with a timeout instead of blocking.
WriteTimeout time.Duration
// Maximum number of retries before giving up.
// Default is to not retry failed queries.
MaxRetries int
// Whether to retry queries cancelled because of statement_timeout.
RetryStatementTimeout bool
// Minimum backoff between each retry.
// Default is 250 milliseconds; -1 disables backoff.
MinRetryBackoff time.Duration
// Maximum backoff between each retry.
// Default is 4 seconds; -1 disables backoff.
MaxRetryBackoff time.Duration
// Maximum number of socket connections.
// Default is 10 connections per every CPU as reported by runtime.NumCPU.
PoolSize int
// Minimum number of idle connections which is useful when establishing
// new connection is slow.
MinIdleConns int
// Connection age at which client retires (closes) the connection.
// It is useful with proxies like PgBouncer and HAProxy.
// Default is to not close aged connections.
MaxConnAge time.Duration
// Time for which client waits for free connection if all
// connections are busy before returning an error.
// Default is 30 seconds if ReadTimeOut is not defined, otherwise,
// ReadTimeout + 1 second.
PoolTimeout time.Duration
// Amount of time after which client closes idle connections.
// Should be less than server's timeout.
// Default is 5 minutes. -1 disables idle timeout check.
IdleTimeout time.Duration
// Frequency of idle checks made by idle connections reaper.
// Default is 1 minute. -1 disables idle connections reaper,
// but idle connections are still discarded by the client
// if IdleTimeout is set.
IdleCheckFrequency time.Duration
}
func (opt *Options) init() {
if opt.Network == "" {
opt.Network = "tcp"
}
if opt.Addr == "" {
switch opt.Network {
case "tcp":
host := env("PGHOST", "localhost")
port := env("PGPORT", "5432")
opt.Addr = fmt.Sprintf("%s:%s", host, port)
case "unix":
opt.Addr = "/var/run/postgresql/.s.PGSQL.5432"
}
}
if opt.DialTimeout == 0 {
opt.DialTimeout = 5 * time.Second
}
if opt.Dialer == nil {
opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
netDialer := &net.Dialer{
Timeout: opt.DialTimeout,
KeepAlive: 5 * time.Minute,
}
return netDialer.DialContext(ctx, network, addr)
}
}
if opt.User == "" {
opt.User = env("PGUSER", "postgres")
}
if opt.Database == "" {
opt.Database = env("PGDATABASE", "postgres")
}
if opt.PoolSize == 0 {
opt.PoolSize = 10 * runtime.NumCPU()
}
if opt.PoolTimeout == 0 {
if opt.ReadTimeout != 0 {
opt.PoolTimeout = opt.ReadTimeout + time.Second
} else {
opt.PoolTimeout = 30 * time.Second
}
}
if opt.IdleTimeout == 0 {
opt.IdleTimeout = 5 * time.Minute
}
if opt.IdleCheckFrequency == 0 {
opt.IdleCheckFrequency = time.Minute
}
switch opt.MinRetryBackoff {
case -1:
opt.MinRetryBackoff = 0
case 0:
opt.MinRetryBackoff = 250 * time.Millisecond
}
switch opt.MaxRetryBackoff {
case -1:
opt.MaxRetryBackoff = 0
case 0:
opt.MaxRetryBackoff = 4 * time.Second
}
}
func | (key, defValue string) string {
envValue := os.Getenv(key)
if envValue != "" {
return envValue
}
return defValue
}
// ParseURL parses an URL into options that can be used to connect to PostgreSQL.
func ParseURL(sURL string) (*Options, error) {
parsedURL, err := url.Parse(sURL)
if err != nil {
return nil, err
}
// scheme
if parsedURL.Scheme != "postgres" && parsedURL.Scheme != "postgresql" {
return nil, errors.New("pg: invalid scheme: " + parsedURL.Scheme)
}
// host and port
options := &Options{
Addr: parsedURL.Host,
}
if !strings.Contains(options.Addr, ":") {
options.Addr += ":5432"
}
// username and password
if parsedURL.User != nil {
options.User = parsedURL.User.Username()
if password, ok := parsedURL.User.Password(); ok {
options.Password = password
}
}
if options.User == "" {
options.User = "postgres"
}
// database
if len(strings.Trim(parsedURL.Path, "/")) > 0 {
options.Database = parsedURL.Path[1:]
} else {
return nil, errors.New("pg: database name not provided")
}
// ssl mode
query, err := url.ParseQuery(parsedURL.RawQuery)
if err != nil {
return nil, err
}
if sslMode, ok := query["sslmode"]; ok && len(sslMode) > 0 {
switch sslMode[0] {
case "verify-ca", "verify-full":
options.TLSConfig = &tls.Config{}
case "allow", "prefer", "require":
options.TLSConfig = &tls.Config{InsecureSkipVerify: true} //nolint
case "disable":
options.TLSConfig = nil
default:
return nil, fmt.Errorf("pg: sslmode '%v' is not supported", sslMode[0])
}
} else {
options.TLSConfig = &tls.Config{InsecureSkipVerify: true} //nolint
}
delete(query, "sslmode")
if appName, ok := query["application_name"]; ok && len(appName) > 0 {
options.ApplicationName = appName[0]
}
delete(query, "application_name")
if connTimeout, ok := query["connect_timeout"]; ok && len(connTimeout) > 0 {
ct, err := strconv.Atoi(connTimeout[0])
if err != nil {
return nil, fmt.Errorf("pg: cannot parse connect_timeout option as int")
}
options.DialTimeout = time.Second * time.Duration(ct)
}
delete(query, "connect_timeout")
if len(query) > 0 {
return nil, errors.New("pg: options other than 'sslmode', 'application_name' and 'connect_timeout' are not supported")
}
return options, nil
}
func (opt *Options) getDialer() func(context.Context) (net.Conn, error) {
return func(ctx context.Context) (net.Conn, error) {
return opt.Dialer(ctx, opt.Network, opt.Addr)
}
}
func newConnPool(opt *Options) *pool.ConnPool {
return pool.NewConnPool(&pool.Options{
Dialer: opt.getDialer(),
OnClose: terminateConn,
PoolSize: opt.PoolSize,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: opt.IdleCheckFrequency,
})
}
| env |
tencent.go | /*
Copyright 2021 The DnsJia Authors.
WebSite: https://github.com/dnsjia/luban
Email: [email protected]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and | */
package cloudvendor | limitations under the License. |
source.py | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
from datetime import datetime
from typing import Dict, Generator
import smartsheet
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
Status,
Type,
)
# helpers
from airbyte_cdk.sources import Source
def get_prop(col_type: str) -> Dict[str, any]:
props = {
"TEXT_NUMBER": {"type": "string"},
"DATE": {"type": "string", "format": "date"},
"DATETIME": {"type": "string", "format": "date-time"},
}
return props.get(col_type, {"type": "string"})
def get_json_schema(sheet: Dict) -> Dict:
|
# main class definition
class SourceSmartsheets(Source):
def check(self, logger: AirbyteLogger, config: json) -> AirbyteConnectionStatus:
try:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
smartsheet_client = smartsheet.Smartsheet(access_token)
smartsheet_client.errors_as_exceptions(True)
smartsheet_client.Sheets.get_sheet(spreadsheet_id)
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
if isinstance(e, smartsheet.exceptions.ApiError):
err = e.error.result
code = 404 if err.code == 1006 else err.code
reason = f"{err.name}: {code} - {err.message} | Check your spreadsheet ID."
else:
reason = str(e)
logger.error(reason)
return AirbyteConnectionStatus(status=Status.FAILED)
def discover(self, logger: AirbyteLogger, config: json) -> AirbyteCatalog:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
streams = []
smartsheet_client = smartsheet.Smartsheet(access_token)
try:
sheet = smartsheet_client.Sheets.get_sheet(spreadsheet_id)
sheet = json.loads(str(sheet)) # make it subscriptable
sheet_json_schema = get_json_schema(sheet)
logger.info(f"Running discovery on sheet: {sheet['name']} with {spreadsheet_id}")
stream = AirbyteStream(name=sheet["name"], json_schema=sheet_json_schema)
stream.supported_sync_modes = ["full_refresh"]
streams.append(stream)
except Exception as e:
raise Exception(f"Could not run discovery: {str(e)}")
return AirbyteCatalog(streams=streams)
def read(
self, logger: AirbyteLogger, config: json, catalog: ConfiguredAirbyteCatalog, state: Dict[str, any]
) -> Generator[AirbyteMessage, None, None]:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
smartsheet_client = smartsheet.Smartsheet(access_token)
for configured_stream in catalog.streams:
stream = configured_stream.stream
properties = stream.json_schema["properties"]
if isinstance(properties, list):
columns = tuple(key for dct in properties for key in dct.keys())
elif isinstance(properties, dict):
columns = tuple(i for i in properties.keys())
else:
logger.error("Could not read properties from the JSONschema in this stream")
name = stream.name
try:
sheet = smartsheet_client.Sheets.get_sheet(spreadsheet_id)
sheet = json.loads(str(sheet)) # make it subscriptable
logger.info(f"Starting syncing spreadsheet {sheet['name']}")
logger.info(f"Row count: {sheet['totalRowCount']}")
for row in sheet["rows"]:
# convert all data to string as it is only expected format in schema
values = tuple(str(i["value"]) if "value" in i else "" for i in row["cells"])
try:
data = dict(zip(columns, values))
yield AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(stream=name, data=data, emitted_at=int(datetime.now().timestamp()) * 1000),
)
except Exception as e:
logger.error(f"Unable to encode row into an AirbyteMessage with the following error: {e}")
except Exception as e:
logger.error(f"Could not read smartsheet: {name}")
raise e
logger.info(f"Finished syncing spreadsheet with ID: {spreadsheet_id}")
| column_info = {i["title"]: get_prop(i["type"]) for i in sheet["columns"]}
json_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": column_info,
}
return json_schema |
flag_float64.go | package cli
import (
"flag"
"fmt"
"strconv"
)
// Float64Flag is a flag with type float64
type Float64Flag struct {
Name string
Aliases []string
Usage string
EnvVars []string
FilePath string
Required bool
Hidden bool
Value float64
DefaultText string
Destination *float64
HasBeenSet bool
}
// IsSet returns whether or not the flag has been set through env or file
func (f *Float64Flag) IsSet() bool {
return f.HasBeenSet
}
// String returns a readable representation of this value
// (for usage defaults)
func (f *Float64Flag) String() string {
return FlagStringer(f)
}
// Names returns the names of the flag
func (f *Float64Flag) Names() []string {
return flagNames(f.Name, f.Aliases)
}
// IsRequired returns whether or not the flag is required
func (f *Float64Flag) IsRequired() bool {
return f.Required
}
// TakesValue returns true of the flag takes a value, otherwise false
func (f *Float64Flag) TakesValue() bool {
return true
}
// GetUsage returns the usage string for the flag
func (f *Float64Flag) GetUsage() string {
return f.Usage
}
// GetValue returns the flags value as string representation and an empty
// string if the flag takes no value at all.
func (f *Float64Flag) GetValue() string {
return fmt.Sprintf("%f", f.Value)
}
// Apply populates the flag given the flag set and environment
func (f *Float64Flag) Apply(set *flag.FlagSet) error {
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
if val != "" {
valFloat, err := strconv.ParseFloat(val, 10)
if err != nil {
return fmt.Errorf("could not parse %q as float64 value for flag %s: %s", val, f.Name, err)
}
f.Value = valFloat
f.HasBeenSet = true
}
}
for _, name := range f.Names() {
if f.Destination != nil {
set.Float64Var(f.Destination, name, f.Value, f.Usage)
continue
}
set.Float64(name, f.Value, f.Usage)
}
return nil
}
// Float64 looks up the value of a local Float64Flag, returns
// 0 if not found
func (c *Context) Float64(name string) float64 {
return lookupFloat64(c.resolveFlagDeep(name))
}
func lookupFloat64(f *flag.Flag) float64 {
if f != nil {
parsed, err := strconv.ParseFloat(f.Value.String(), 64)
if err != nil |
return parsed
}
return 0
}
| {
return 0
} |
fast_reject.rs | use crate::mir::Mutability;
use crate::ty::subst::GenericArgKind;
use crate::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc_hir::def_id::DefId;
use std::fmt::Debug;
use std::hash::Hash;
use std::iter;
use self::SimplifiedTypeGen::*;
pub type SimplifiedType = SimplifiedTypeGen<DefId>;
/// See `simplify_type`
///
/// Note that we keep this type generic over the type of identifier it uses
/// because we sometimes need to use SimplifiedTypeGen values as stable sorting
/// keys (in which case we use a DefPathHash as id-type) but in the general case
/// the non-stable but fast to construct DefId-version is the better choice.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
pub enum SimplifiedTypeGen<D>
where
D: Copy + Debug + Eq,
{
BoolSimplifiedType,
CharSimplifiedType,
IntSimplifiedType(ty::IntTy),
UintSimplifiedType(ty::UintTy),
FloatSimplifiedType(ty::FloatTy),
AdtSimplifiedType(D),
ForeignSimplifiedType(D),
StrSimplifiedType,
ArraySimplifiedType,
SliceSimplifiedType,
RefSimplifiedType(Mutability),
PtrSimplifiedType(Mutability),
NeverSimplifiedType,
TupleSimplifiedType(usize),
/// A trait object, all of whose components are markers
/// (e.g., `dyn Send + Sync`).
MarkerTraitObjectSimplifiedType,
TraitSimplifiedType(D),
ClosureSimplifiedType(D),
GeneratorSimplifiedType(D),
GeneratorWitnessSimplifiedType(usize),
OpaqueSimplifiedType(D),
FunctionSimplifiedType(usize),
PlaceholderSimplifiedType,
}
/// Generic parameters are pretty much just bound variables, e.g.
/// the type of `fn foo<'a, T>(x: &'a T) -> u32 { ... }` can be thought of as
/// `for<'a, T> fn(&'a T) -> u32`.
///
/// Typecheck of `foo` has to succeed for all possible generic arguments, so
/// during typeck, we have to treat its generic parameters as if they
/// were placeholders.
///
/// But when calling `foo` we only have to provide a specific generic argument.
/// In that case the generic parameters are instantiated with inference variables.
/// As we use `simplify_type` before that instantiation happens, we just treat
/// generic parameters as if they were inference variables in that case.
#[derive(PartialEq, Eq, Debug, Clone, Copy)]
pub enum TreatParams {
/// Treat parameters as placeholders in the given environment.
///
/// Note that this also causes us to treat projections as if they were
/// placeholders. This is only correct if the given projection cannot
/// be normalized in the current context. Even if normalization fails,
/// it may still succeed later if the projection contains any inference
/// variables.
AsPlaceholder,
AsInfer,
}
/// Tries to simplify a type by only returning the outermost injective¹ layer, if one exists.
///
/// **This function should only be used if you need to store or retrieve the type from some
/// hashmap. If you want to quickly decide whether two types may unify, use the [DeepRejectCtxt]
/// instead.**
///
/// The idea is to get something simple that we can use to quickly decide if two types could unify,
/// for example during method lookup. If this function returns `Some(x)` it can only unify with
/// types for which this method returns either `Some(x)` as well or `None`.
///
/// A special case here are parameters and projections, which are only injective
/// if they are treated as placeholders.
///
/// For example when storing impls based on their simplified self type, we treat
/// generic parameters as if they were inference variables. We must not simplify them here,
/// as they can unify with any other type.
///
/// With projections we have to be even more careful, as treating them as placeholders
/// is only correct if they are fully normalized.
///
/// ¹ meaning that if the outermost layers are different, then the whole types are also different.
pub fn simplify_type<'tcx>(
tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
treat_params: TreatParams,
) -> Option<SimplifiedType> {
match *ty.kind() {
ty::Bool => Some(BoolSimplifiedType),
ty::Char => Some(CharSimplifiedType),
ty::Int(int_type) => Some(IntSimplifiedType(int_type)),
ty::Uint(uint_type) => Some(UintSimplifiedType(uint_type)),
ty::Float(float_type) => Some(FloatSimplifiedType(float_type)),
ty::Adt(def, _) => Some(AdtSimplifiedType(def.did())),
ty::Str => Some(StrSimplifiedType),
ty::Array(..) => Some(ArraySimplifiedType),
ty::Slice(..) => Some(SliceSimplifiedType),
ty::RawPtr(ptr) => Some(PtrSimplifiedType(ptr.mutbl)),
ty::Dynamic(trait_info, ..) => match trait_info.principal_def_id() {
Some(principal_def_id) if !tcx.trait_is_auto(principal_def_id) => {
Some(TraitSimplifiedType(principal_def_id))
}
_ => Some(MarkerTraitObjectSimplifiedType),
},
ty::Ref(_, _, mutbl) => Some(RefSimplifiedType(mutbl)),
ty::FnDef(def_id, _) | ty::Closure(def_id, _) => Some(ClosureSimplifiedType(def_id)),
ty::Generator(def_id, _, _) => Some(GeneratorSimplifiedType(def_id)),
ty::GeneratorWitness(tys) => Some(GeneratorWitnessSimplifiedType(tys.skip_binder().len())),
ty::Never => Some(NeverSimplifiedType),
ty::Tuple(tys) => Some(TupleSimplifiedType(tys.len())),
ty::FnPtr(f) => Some(FunctionSimplifiedType(f.skip_binder().inputs().len())),
ty::Placeholder(..) => Some(PlaceholderSimplifiedType),
ty::Param(_) => match treat_params {
TreatParams::AsPlaceholder => Some(PlaceholderSimplifiedType),
TreatParams::AsInfer => None,
},
ty::Projection(_) => match treat_params {
// When treating `ty::Param` as a placeholder, projections also
// don't unify with anything else as long as they are fully normalized.
//
// We will have to be careful with lazy normalization here.
TreatParams::AsPlaceholder if !ty.has_infer_types_or_consts() => {
debug!("treating `{}` as a placeholder", ty);
Some(PlaceholderSimplifiedType)
}
TreatParams::AsPlaceholder | TreatParams::AsInfer => None,
},
ty::Opaque(def_id, _) => Some(OpaqueSimplifiedType(def_id)),
ty::Foreign(def_id) => Some(ForeignSimplifiedType(def_id)),
ty::Bound(..) | ty::Infer(_) | ty::Error(_) => None,
}
}
impl<D: Copy + Debug + Eq> SimplifiedTypeGen<D> {
pub fn def(self) -> Option<D> {
match self {
AdtSimplifiedType(d)
| ForeignSimplifiedType(d)
| TraitSimplifiedType(d)
| ClosureSimplifiedType(d)
| GeneratorSimplifiedType(d)
| OpaqueSimplifiedType(d) => Some(d),
_ => None,
}
}
pub fn map_def<U, F>(self, map: F) -> SimplifiedTypeGen<U>
where
F: Fn(D) -> U,
U: Copy + Debug + Eq,
{
match self {
BoolSimplifiedType => BoolSimplifiedType,
CharSimplifiedType => CharSimplifiedType,
IntSimplifiedType(t) => IntSimplifiedType(t),
UintSimplifiedType(t) => UintSimplifiedType(t),
FloatSimplifiedType(t) => FloatSimplifiedType(t),
AdtSimplifiedType(d) => AdtSimplifiedType(map(d)),
ForeignSimplifiedType(d) => ForeignSimplifiedType(map(d)),
StrSimplifiedType => StrSimplifiedType,
ArraySimplifiedType => ArraySimplifiedType,
SliceSimplifiedType => SliceSimplifiedType,
RefSimplifiedType(m) => RefSimplifiedType(m),
PtrSimplifiedType(m) => PtrSimplifiedType(m),
NeverSimplifiedType => NeverSimplifiedType,
MarkerTraitObjectSimplifiedType => MarkerTraitObjectSimplifiedType,
TupleSimplifiedType(n) => TupleSimplifiedType(n),
TraitSimplifiedType(d) => TraitSimplifiedType(map(d)),
ClosureSimplifiedType(d) => ClosureSimplifiedType(map(d)),
GeneratorSimplifiedType(d) => GeneratorSimplifiedType(map(d)),
GeneratorWitnessSimplifiedType(n) => GeneratorWitnessSimplifiedType(n),
OpaqueSimplifiedType(d) => OpaqueSimplifiedType(map(d)),
FunctionSimplifiedType(n) => FunctionSimplifiedType(n),
PlaceholderSimplifiedType => PlaceholderSimplifiedType,
}
}
}
/// Given generic arguments from an obligation and an impl,
/// could these two be unified after replacing parameters in the
/// the impl with inference variables.
///
/// For obligations, parameters won't be replaced by inference
/// variables and only unify with themselves. We treat them
/// the same way we treat placeholders.
///
/// We also use this function during coherence. For coherence the
/// impls only have to overlap for some value, so we treat parameters
/// on both sides like inference variables. This behavior is toggled
/// using the `treat_obligation_params` field.
#[derive(Debug, Clone, Copy)]
pub struct DeepRejectCtxt {
pub treat_obligation_params: TreatParams,
}
impl DeepRejectCtxt {
pub fn generic_args_may_unify(
self,
obligation_arg: ty::GenericArg<'_>,
impl_arg: ty::GenericArg<'_>,
) -> bool {
match (obligation_arg.unpack(), impl_arg.unpack()) {
// We don't fast reject based on regions for now.
(GenericArgKind::Lifetime(_), GenericArgKind::Lifetime(_)) => true,
(GenericArgKind::Type(obl), GenericArgKind::Type(imp)) => {
self.types_may_unify(obl, imp)
}
(GenericArgKind::Const(obl), GenericArgKind::Const(imp)) => {
self.consts_may_unify(obl, imp)
}
_ => bug!("kind mismatch: {obligation_arg} {impl_arg}"),
}
}
pub fn ty | elf, obligation_ty: Ty<'_>, impl_ty: Ty<'_>) -> bool {
match impl_ty.kind() {
// Start by checking whether the type in the impl may unify with
// pretty much everything. Just return `true` in that case.
ty::Param(_) | ty::Projection(_) | ty::Error(_) => return true,
// These types only unify with inference variables or their own
// variant.
ty::Bool
| ty::Char
| ty::Int(_)
| ty::Uint(_)
| ty::Float(_)
| ty::Adt(..)
| ty::Str
| ty::Array(..)
| ty::Slice(..)
| ty::RawPtr(..)
| ty::Dynamic(..)
| ty::Ref(..)
| ty::Never
| ty::Tuple(..)
| ty::FnPtr(..)
| ty::Foreign(..)
| ty::Opaque(..) => {}
ty::FnDef(..)
| ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
| ty::Placeholder(..)
| ty::Bound(..)
| ty::Infer(_) => bug!("unexpected impl_ty: {impl_ty}"),
}
let k = impl_ty.kind();
match *obligation_ty.kind() {
// Purely rigid types, use structural equivalence.
ty::Bool
| ty::Char
| ty::Int(_)
| ty::Uint(_)
| ty::Float(_)
| ty::Str
| ty::Never
| ty::Foreign(_) => obligation_ty == impl_ty,
ty::Ref(_, obl_ty, obl_mutbl) => match k {
&ty::Ref(_, impl_ty, impl_mutbl) => {
obl_mutbl == impl_mutbl && self.types_may_unify(obl_ty, impl_ty)
}
_ => false,
},
ty::Adt(obl_def, obl_substs) => match k {
&ty::Adt(impl_def, impl_substs) => {
obl_def == impl_def
&& iter::zip(obl_substs, impl_substs)
.all(|(obl, imp)| self.generic_args_may_unify(obl, imp))
}
_ => false,
},
ty::Slice(obl_ty) => {
matches!(k, &ty::Slice(impl_ty) if self.types_may_unify(obl_ty, impl_ty))
}
ty::Array(obl_ty, obl_len) => match k {
&ty::Array(impl_ty, impl_len) => {
self.types_may_unify(obl_ty, impl_ty)
&& self.consts_may_unify(obl_len, impl_len)
}
_ => false,
},
ty::Tuple(obl) => match k {
&ty::Tuple(imp) => {
obl.len() == imp.len()
&& iter::zip(obl, imp).all(|(obl, imp)| self.types_may_unify(obl, imp))
}
_ => false,
},
ty::RawPtr(obl) => match k {
ty::RawPtr(imp) => obl.mutbl == imp.mutbl && self.types_may_unify(obl.ty, imp.ty),
_ => false,
},
ty::Dynamic(obl_preds, ..) => {
// Ideally we would walk the existential predicates here or at least
// compare their length. But considering that the relevant `Relate` impl
// actually sorts and deduplicates these, that doesn't work.
matches!(k, ty::Dynamic(impl_preds, ..) if
obl_preds.principal_def_id() == impl_preds.principal_def_id()
)
}
ty::FnPtr(obl_sig) => match k {
ty::FnPtr(impl_sig) => {
let ty::FnSig { inputs_and_output, c_variadic, unsafety, abi } =
obl_sig.skip_binder();
let impl_sig = impl_sig.skip_binder();
abi == impl_sig.abi
&& c_variadic == impl_sig.c_variadic
&& unsafety == impl_sig.unsafety
&& inputs_and_output.len() == impl_sig.inputs_and_output.len()
&& iter::zip(inputs_and_output, impl_sig.inputs_and_output)
.all(|(obl, imp)| self.types_may_unify(obl, imp))
}
_ => false,
},
// Opaque types in impls should be forbidden, but that doesn't
// stop compilation. So this match arm should never return true
// if compilation succeeds.
ty::Opaque(..) => matches!(k, ty::Opaque(..)),
// Impls cannot contain these types as these cannot be named directly.
ty::FnDef(..) | ty::Closure(..) | ty::Generator(..) => false,
ty::Placeholder(..) => false,
// Depending on the value of `treat_obligation_params`, we either
// treat generic parameters like placeholders or like inference variables.
ty::Param(_) => match self.treat_obligation_params {
TreatParams::AsPlaceholder => false,
TreatParams::AsInfer => true,
},
ty::Infer(_) => true,
// As we're walking the whole type, it may encounter projections
// inside of binders and what not, so we're just going to assume that
// projections can unify with other stuff.
//
// Looking forward to lazy normalization this is the safer strategy anyways.
ty::Projection(_) => true,
ty::Error(_) => true,
ty::GeneratorWitness(..) | ty::Bound(..) => {
bug!("unexpected obligation type: {:?}", obligation_ty)
}
}
}
pub fn consts_may_unify(self, obligation_ct: ty::Const<'_>, impl_ct: ty::Const<'_>) -> bool {
match impl_ct.val() {
ty::ConstKind::Param(_) | ty::ConstKind::Unevaluated(_) | ty::ConstKind::Error(_) => {
return true;
}
ty::ConstKind::Value(_) => {}
ty::ConstKind::Infer(_) | ty::ConstKind::Bound(..) | ty::ConstKind::Placeholder(_) => {
bug!("unexpected impl arg: {:?}", impl_ct)
}
}
let k = impl_ct.val();
match obligation_ct.val() {
ty::ConstKind::Param(_) => match self.treat_obligation_params {
TreatParams::AsPlaceholder => false,
TreatParams::AsInfer => true,
},
// As we don't necessarily eagerly evaluate constants,
// they might unify with any value.
ty::ConstKind::Unevaluated(_) | ty::ConstKind::Error(_) => true,
ty::ConstKind::Value(obl) => match k {
ty::ConstKind::Value(imp) => {
// FIXME(valtrees): Once we have valtrees, we can just
// compare them directly here.
match (obl.try_to_scalar_int(), imp.try_to_scalar_int()) {
(Some(obl), Some(imp)) => obl == imp,
_ => true,
}
}
_ => true,
},
ty::ConstKind::Infer(_) => true,
ty::ConstKind::Bound(..) | ty::ConstKind::Placeholder(_) => {
bug!("unexpected obl const: {:?}", obligation_ct)
}
}
}
}
| pes_may_unify(s |
licensee.rs | use crate::{
error::{ParseError, Reason},
lexer::{Lexer, Token},
ExceptionId, LicenseItem, LicenseReq,
};
use std::fmt;
/// A convenience wrapper for a license and optional exception
/// that can be checked against a license requirement to see
/// if it satisfies the requirement placed by a license holder
///
/// ```
/// let licensee = spdx::Licensee::parse("GPL-2.0").unwrap();
///
/// assert!(licensee.satisfies(&spdx::LicenseReq::from(spdx::license_id("GPL-2.0-only").unwrap())));
/// ```
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug)]
pub struct Licensee {
inner: LicenseReq,
}
impl fmt::Display for Licensee {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
impl Licensee {
/// Creates a licensee from its component parts. Note that use of SPDX's
/// `or_later` is completely ignored for licensees as it only applies
/// to the license holder(s) not the licensee
pub fn new(license: LicenseItem, exception: Option<ExceptionId>) -> Self {
if let LicenseItem::SPDX { or_later, .. } = &license {
debug_assert!(!or_later)
}
Self {
inner: LicenseReq { license, exception },
}
}
/// Parses an simplified version of an SPDX license expression that
/// can contain at most 1 valid SDPX license with an optional exception
/// joined by a WITH.
///
/// ```
/// use spdx::Licensee;
///
/// // Normal single license
/// Licensee::parse("MIT").unwrap();
///
/// // SPDX allows license identifiers outside of the official license list
/// // via the LicenseRef- prefix
/// Licensee::parse("LicenseRef-My-Super-Extra-Special-License").unwrap();
///
/// // License and exception
/// Licensee::parse("Apache-2.0 WITH LLVM-exception").unwrap();
///
/// // `+` is only allowed to be used by license requirements from the license holder
/// Licensee::parse("Apache-2.0+").unwrap_err();
///
/// Licensee::parse("GPL-2.0").unwrap();
///
/// // GNU suffix license (GPL, AGPL, LGPL, GFDL) must not contain the suffix
/// Licensee::parse("GPL-3.0-or-later").unwrap_err();
///
/// ```
pub fn parse(original: &str) -> Result<Self, ParseError<'_>> {
let mut lexer = Lexer::new(original);
let license = {
let lt = lexer.next().ok_or_else(|| ParseError {
original,
span: 0..original.len(),
reason: Reason::Empty,
})??;
match lt.token {
Token::SPDX(id) => {
// If we have one of the GNU licenses which use the `-only` or `-or-later` suffixes
// return an error rather than silently truncating, the `-only` and `-or-later`
// suffixes are for the license holder(s) to specify what license(s) they can be
// licensed under, not for the licensee, similarly to the `+`
if id.is_gnu() {
let is_only = original.ends_with("-only");
let or_later = original.ends_with("-or-later");
if is_only || or_later {
return Err(ParseError {
original,
span: if is_only {
original.len() - 5..original.len()
} else {
original.len() - 9..original.len()
},
reason: Reason::Unexpected(&["<bare-gnu-license>"]),
});
}
}
LicenseItem::SPDX {
id,
or_later: false,
}
}
Token::LicenseRef { doc_ref, lic_ref } => LicenseItem::Other {
doc_ref: doc_ref.map(String::from),
lic_ref: lic_ref.to_owned(),
},
_ => {
return Err(ParseError {
original,
span: lt.span,
reason: Reason::Unexpected(&["<license>"]),
})
}
}
};
let exception = match lexer.next() {
None => None,
Some(lt) => {
let lt = lt?;
match lt.token {
Token::With => {
let lt = lexer.next().ok_or(ParseError {
original,
span: lt.span,
reason: Reason::Empty,
})??;
match lt.token {
Token::Exception(exc) => Some(exc),
_ => {
return Err(ParseError {
original,
span: lt.span,
reason: Reason::Unexpected(&["<exception>"]),
})
}
}
}
_ => {
return Err(ParseError {
original,
span: lt.span,
reason: Reason::Unexpected(&["WITH"]),
})
}
}
}
};
Ok(Licensee {
inner: LicenseReq { license, exception },
})
}
/// Determines whether the specified license requirement is satisfied by
/// this license (+exception)
///
/// ```
/// let licensee = spdx::Licensee::parse("Apache-2.0 WITH LLVM-exception").unwrap();
///
/// assert!(licensee.satisfies(&spdx::LicenseReq {
/// license: spdx::LicenseItem::SPDX {
/// id: spdx::license_id("Apache-2.0").unwrap(),
/// // Means the license holder is fine with Apache-2.0 or higher
/// or_later: true,
/// },
/// exception: spdx::exception_id("LLVM-exception"),
/// }));
/// ```
pub fn satisfies(&self, req: &LicenseReq) -> bool {
match (&self.inner.license, &req.license) {
(LicenseItem::SPDX { id: a, .. }, LicenseItem::SPDX { id: b, or_later }) => {
if a.index != b.index {
if *or_later {
// Many of the SPDX identifiers end with `-<version number>`,
// so chop that off and ensure the base strings match, and if so,
// just a do a lexical compare, if this "allowed license" is >,
// then we satisfed the license requirement
let a_name = &a.name[..a.name.rfind('-').unwrap_or_else(|| a.name.len())];
let b_name = &b.name[..b.name.rfind('-').unwrap_or_else(|| b.name.len())];
if a_name != b_name || a.name < b.name {
return false;
}
} else {
return false;
}
}
}
(
LicenseItem::Other {
doc_ref: doca,
lic_ref: lica,
},
LicenseItem::Other {
doc_ref: docb,
lic_ref: licb,
},
) => {
if doca != docb || lica != licb {
return false;
}
}
_ => return false,
}
req.exception == self.inner.exception
}
}
impl PartialOrd<LicenseReq> for Licensee {
fn partial_cmp(&self, o: &LicenseReq) -> Option<std::cmp::Ordering> {
self.inner.partial_cmp(o)
}
}
impl PartialEq<LicenseReq> for Licensee {
fn eq(&self, o: &LicenseReq) -> bool {
self.inner.eq(o)
}
}
#[cfg(test)]
mod test {
use crate::{exception_id, license_id, LicenseItem, LicenseReq, Licensee};
const LICENSEES: &[&str] = &[
"LicenseRef-Embark-Proprietary",
"BSD-2-Clause",
"Apache-2.0 WITH LLVM-exception",
"BSD-2-Clause-FreeBSD",
"BSL-1.0",
"Zlib",
"CC0-1.0",
"FTL",
"ISC",
"MIT",
"MPL-2.0",
"BSD-3-Clause",
"Unicode-DFS-2016",
"Unlicense",
"Apache-2.0",
];
#[test]
fn handles_or_later() {
let mut licensees: Vec<_> = LICENSEES
.iter()
.map(|l| Licensee::parse(l).unwrap())
.collect();
licensees.sort();
let mpl_id = license_id("MPL-2.0").unwrap();
let req = LicenseReq {
license: LicenseItem::SPDX {
id: mpl_id,
or_later: true,
},
exception: None,
};
// Licensees can't have the `or_later`
assert!(licensees.binary_search_by(|l| l.inner.cmp(&req)).is_err());
match &licensees[licensees
.binary_search_by(|l| l.partial_cmp(&req).unwrap())
.unwrap()]
.inner
.license
{
LicenseItem::SPDX { id, .. } => assert_eq!(*id, mpl_id),
o => panic!("unexepcted {:?}", o),
}
}
#[test]
fn handles_exceptions() {
let mut licensees: Vec<_> = LICENSEES
.iter()
.map(|l| Licensee::parse(l).unwrap())
.collect();
licensees.sort();
let apache_id = license_id("Apache-2.0").unwrap();
let llvm_exc = exception_id("LLVM-exception").unwrap();
let req = LicenseReq {
license: LicenseItem::SPDX {
id: apache_id,
or_later: false,
},
exception: Some(llvm_exc),
};
assert_eq!(
&req,
&licensees[licensees
.binary_search_by(|l| l.partial_cmp(&req).unwrap())
.unwrap()]
.inner
);
}
#[test]
fn handles_license_ref() {
let mut licensees: Vec<_> = LICENSEES
.iter()
.map(|l| Licensee::parse(l).unwrap())
.collect();
licensees.sort();
let req = LicenseReq {
license: LicenseItem::Other {
doc_ref: None,
lic_ref: "Embark-Proprietary".to_owned(),
},
exception: None,
};
assert_eq!(
&req,
&licensees[licensees
.binary_search_by(|l| l.partial_cmp(&req).unwrap())
.unwrap()]
.inner
);
}
#[test]
fn handles_close() {
let mut licensees: Vec<_> = LICENSEES
.iter()
.map(|l| Licensee::parse(l).unwrap())
.collect();
licensees.sort();
for id in &["BSD-2-Clause", "BSD-2-Clause-FreeBSD"] {
let lic_id = license_id(id).unwrap();
let req = LicenseReq {
license: LicenseItem::SPDX {
id: lic_id,
or_later: true,
},
exception: None,
};
// Licensees can't have the `or_later`
assert!(licensees.binary_search_by(|l| l.inner.cmp(&req)).is_err());
match &licensees[licensees
.binary_search_by(|l| l.partial_cmp(&req).unwrap())
.unwrap()]
.inner
.license
{
LicenseItem::SPDX { id, .. } => assert_eq!(*id, lic_id),
o => panic!("unexepcted {:?}", o),
}
}
}
}
| {
self.inner.fmt(f)
} |
driver.py | # Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils as utils
from vitrage.common.constants import DatasourceAction
from vitrage.common.constants import DatasourceOpts as DSOpts
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.datasources.alarm_driver_base import AlarmDriverBase
from vitrage.datasources.zabbix.properties import ZabbixProperties as ZProps
from vitrage.datasources.zabbix.properties import ZabbixTriggerStatus \
as TriggerStatus
from vitrage.datasources.zabbix.properties import ZabbixTriggerValue \
as TriggerValue
from vitrage.datasources.zabbix import ZABBIX_DATASOURCE
from vitrage.utils import file as file_utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class ZabbixDriver(AlarmDriverBase):
ServiceKey = namedtuple('ServiceKey', ['hostname', 'triggerid'])
conf_map = None
def __init__(self):
super(ZabbixDriver, self).__init__()
if not ZabbixDriver.conf_map:
ZabbixDriver.conf_map =\
ZabbixDriver._configuration_mapping()
self._client = None
def zabbix_client_login(self):
if not CONF.zabbix.user:
LOG.warning('Zabbix user is not defined')
if not CONF.zabbix.password:
LOG.warning('Zabbix password is not defined')
if not CONF.zabbix.url:
LOG.warning('Zabbix url is not defined')
try:
if not self._client:
self._client = utils.import_object(
'pyzabbix.ZabbixAPI',
CONF.zabbix.url)
self._client.login(
CONF.zabbix.user,
CONF.zabbix.password)
except Exception:
LOG.exception('pyzabbix.ZabbixAPI error occurred.')
self._client = None
def _vitrage_type(self):
return ZABBIX_DATASOURCE
def _alarm_key(self, alarm):
return self.ServiceKey(hostname=alarm[ZProps.RESOURCE_NAME],
triggerid=alarm[ZProps.TRIGGER_ID])
def _get_alarms(self):
self.zabbix_client_login()
if not self._client:
return []
alarms = []
valid_hosts = (host for host in
self._client.host.get(output=[ZProps.HOST])
if host[ZProps.HOST] in ZabbixDriver.conf_map)
for host in valid_hosts:
self._get_triggers_per_host(host, alarms)
return alarms
def _get_triggers_per_host(self, host, alarms):
host_id = host[ZProps.HOST_ID]
triggers = self._client.trigger.get(hostids=host_id,
expandDescription=True)
triggers_rawtexts = self._get_triggers_rawtexts(host_id)
for trigger in triggers:
trigger[ZProps.ZABBIX_RESOURCE_NAME] = host[ZProps.HOST]
trigger_id = trigger[ZProps.TRIGGER_ID]
trigger[ZProps.RAWTEXT] = triggers_rawtexts[trigger_id]
alarms.append(trigger)
def _get_triggers_rawtexts(self, host_id):
output = [ZProps.TRIGGER_ID, ZProps.DESCRIPTION]
triggers = self._client.trigger.get(hostids=host_id, output=output)
return {trigger[ZProps.TRIGGER_ID]: trigger[ZProps.DESCRIPTION]
for trigger in triggers}
def _enrich_alarms(self, alarms):
"""Enrich zabbix alarm using zabbix configuration file
converting Zabbix host name to Vitrage resource type and name
:param alarms: Zabbix alarm
:return: enriched alarm
"""
for alarm in alarms:
alarm[ZProps.VALUE] = self._get_value(alarm)
zabbix_host = alarm[ZProps.ZABBIX_RESOURCE_NAME]
vitrage_host = ZabbixDriver.conf_map[zabbix_host]
alarm[ZProps.RESOURCE_TYPE] = vitrage_host[ZProps.RESOURCE_TYPE]
alarm[ZProps.RESOURCE_NAME] = vitrage_host[ZProps.RESOURCE_NAME]
def _is_erroneous(self, alarm):
return alarm and \
alarm[ZProps.VALUE] == TriggerValue.PROBLEM
def _status_changed(self, new_alarm, old_alarm):
if not (new_alarm and old_alarm):
return False
if new_alarm[ZProps.VALUE] != old_alarm[ZProps.VALUE]:
return True
if new_alarm[ZProps.VALUE] == TriggerValue.PROBLEM:
priority_changed = \
new_alarm[ZProps.PRIORITY] != old_alarm[ZProps.PRIORITY]
description_changed = \
new_alarm[ZProps.DESCRIPTION] != old_alarm[ZProps.DESCRIPTION]
return priority_changed or description_changed
def _is_valid(self, alarm):
return alarm[ZProps.RESOURCE_TYPE] is not None and \
alarm[ZProps.RESOURCE_NAME] is not None
@staticmethod
def _get_value(alarm):
if alarm[ZProps.STATUS] == TriggerStatus.DISABLED:
return TriggerValue.OK
return alarm[ZProps.VALUE]
@staticmethod
def _configuration_mapping():
try:
zabbix_config_file = CONF.zabbix[DSOpts.CONFIG_FILE]
zabbix_config = file_utils.load_yaml_file(zabbix_config_file)
zabbix_config_elements = zabbix_config[ZABBIX_DATASOURCE]
| mappings[element_config['zabbix_host']] = {
ZProps.RESOURCE_TYPE: element_config['type'],
ZProps.RESOURCE_NAME: element_config['name']
}
return mappings
except Exception:
LOG.exception('Failed in init.')
return {}
def enrich_event(self, event, event_type):
event[DSProps.EVENT_TYPE] = event_type
if ZabbixDriver.conf_map:
zabbix_host = event[ZProps.HOST]
event[ZProps.ZABBIX_RESOURCE_NAME] = zabbix_host
v_resource = ZabbixDriver.conf_map[zabbix_host]
event[ZProps.RESOURCE_NAME] = v_resource[ZProps.RESOURCE_NAME]
event[ZProps.RESOURCE_TYPE] = v_resource[ZProps.RESOURCE_TYPE]
return ZabbixDriver.make_pickleable([event], ZABBIX_DATASOURCE,
DatasourceAction.UPDATE)[0]
@staticmethod
def get_event_types():
return ['zabbix.alarm.ok', 'zabbix.alarm.problem']
@staticmethod
def should_delete_outdated_entities():
return True | mappings = {}
for element_config in zabbix_config_elements: |
index.js | import _ from 'lodash';
import React, { Component } from 'react';
import ReactDOM from 'react-dom';
import YTSerch from 'youtube-api-search';
import SearchBar from './components/search_bar';
import VideoList from './components/video_list';
import VideoDetail from './components/video_detail';
import Logo from './components/logo';
import secrets from './secrets';
class App extends Component {
constructor(props) {
super(props);
this.state = {
videos: [],
selectedVideo: null
};
}
videoSearch(term) {
YTSerch({key: secrets.API_KEY_YOUTUBE, term: term}, (videos) => {
this.setState({
videos: videos,
selectedVideo: videos[0]
});
});
}
render() {
const videoSearch = _.debounce((term) => { this.videoSearch(term) }, 300);
return (
<div>
<div className="search">
<Logo />
<SearchBar onSearchTermChange={videoSearch} />
<div className="tagline">Quick Tube... 3x faster than Youtube</div>
</div>
<VideoDetail video={this.state.selectedVideo} videos={this.state.videos} />
<VideoList
onVideoSelect={selectedVideo => this.setState({selectedVideo})}
videos={this.state.videos} />
</div>
);
}
} |
ReactDOM.render(<App />, document.querySelector(".container")); |
|
results.go | package policies
import (
"encoding/json"
"fmt"
"strconv"
"time"
"github.com/pierreprinetti/gophercloud/v3"
"github.com/pierreprinetti/gophercloud/v3/pagination"
)
// Policy represents a clustering policy in the Openstack cloud.
type Policy struct {
CreatedAt time.Time `json:"-"`
Data map[string]interface{} `json:"data"`
Domain string `json:"domain"`
ID string `json:"id"`
Name string `json:"name"`
Project string `json:"project"`
Spec Spec `json:"spec"`
Type string `json:"type"`
UpdatedAt time.Time `json:"-"`
User string `json:"user"`
}
func (r *Policy) UnmarshalJSON(b []byte) error {
type tmp Policy
var s struct {
tmp
CreatedAt string `json:"created_at,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
}
err := json.Unmarshal(b, &s)
if err != nil {
return err
}
*r = Policy(s.tmp)
if s.CreatedAt != "" {
r.CreatedAt, err = time.Parse(gophercloud.RFC3339MilliNoZ, s.CreatedAt)
if err != nil {
r.CreatedAt, err = time.Parse(time.RFC3339, s.CreatedAt)
if err != nil {
return err
}
}
}
if s.UpdatedAt != "" {
r.UpdatedAt, err = time.Parse(gophercloud.RFC3339MilliNoZ, s.UpdatedAt)
if err != nil {
r.UpdatedAt, err = time.Parse(time.RFC3339, s.UpdatedAt)
if err != nil {
return err
}
}
}
return nil
}
// Spec represents an OpenStack clustering policy spec.
type Spec struct {
Description string `json:"description"`
Properties map[string]interface{} `json:"properties"`
Type string `json:"type"`
Version string `json:"-"`
}
func (r *Spec) UnmarshalJSON(b []byte) error {
type tmp Spec
var s struct {
tmp
Version interface{} `json:"version"`
}
err := json.Unmarshal(b, &s)
if err != nil {
return err
}
*r = Spec(s.tmp)
switch t := s.Version.(type) {
case float64:
if t == 1 {
r.Version = fmt.Sprintf("%.1f", t)
} else {
r.Version = strconv.FormatFloat(t, 'f', -1, 64)
}
case string:
r.Version = t
}
return nil
}
func (r Spec) MarshalJSON() ([]byte, error) {
spec := struct {
Type string `json:"type"`
Version string `json:"version"`
Properties map[string]interface{} `json:"properties"`
}{
Type: r.Type,
Version: r.Version,
Properties: r.Properties,
}
return json.Marshal(spec)
}
// policyResult is the resposne of a base Policy result.
type policyResult struct {
gophercloud.Result
}
// Extract interpets any policyResult-base result as a Policy.
func (r policyResult) Extract() (*Policy, error) {
var s struct {
Policy *Policy `json:"policy"`
}
err := r.ExtractInto(&s)
return s.Policy, err
}
// CreateResult is the result of an Update operation. Call its Extract
// method to interpret it as a Policy.
type CreateResult struct {
policyResult
}
// GetResult is the result of a Get operation. Call its Extract method to
// interpret it as a Policy.
type GetResult struct {
policyResult
}
// UpdateResult is the result of an Update operation. Call its Extract
// method to interpret it as a Policy.
type UpdateResult struct {
policyResult
}
// ValidateResult is the result of a Validate operation. Call its Extract
// method to interpret it as a Policy.
type ValidateResult struct {
policyResult
}
// DeleteResult is the result of a Delete operation. Call its Extract
// method to interpret it as a DeleteHeader.
type DeleteResult struct {
gophercloud.ErrResult
}
// PolicyPage contains a list page of all policies from a List call.
type PolicyPage struct {
pagination.MarkerPageBase
}
// IsEmpty determines if a PolicyPage contains any results.
func (page PolicyPage) IsEmpty() (bool, error) {
policies, err := ExtractPolicies(page)
return len(policies) == 0, err
}
// LastMarker returns the last policy ID in a ListResult.
func (r PolicyPage) LastMarker() (string, error) {
policies, err := ExtractPolicies(r)
if err != nil {
return "", err
}
if len(policies) == 0 {
return "", nil
}
return policies[len(policies)-1].ID, nil
}
// ExtractPolicies returns a slice of Policies from the List operation.
func | (r pagination.Page) ([]Policy, error) {
var s struct {
Policies []Policy `json:"policies"`
}
err := (r.(PolicyPage)).ExtractInto(&s)
return s.Policies, err
}
| ExtractPolicies |
main.rs | #![no_std]
#![no_main]
// I2S `peripheral mode` demo
// Signal average level indicator using an RGB LED (APA102 on ItsyBitsy nRF52840)
use embedded_hal::blocking::spi::Write;
use {
core::{
panic::PanicInfo,
sync::atomic::{compiler_fence, Ordering},
},
hal::{
gpio::Level,
i2s::*,
pac::SPIM0,
spim::{Frequency, Mode as SPIMode, Phase, Pins, Polarity, Spim},
},
nrf52840_hal as hal,
rtt_target::{rprintln, rtt_init_print},
};
#[repr(align(4))]
struct Aligned<T: ?Sized>(T);
const OFF: [u8; 9] = [0x00, 0x00, 0x00, 0x00, 0xFF, 0x00, 0x00, 0x00, 0xFF];
const GREEN: [u8; 9] = [0x00, 0x00, 0x00, 0x00, 0xFF, 0x00, 0x10, 0x00, 0xFF];
const ORANGE: [u8; 9] = [0x00, 0x00, 0x00, 0x00, 0xFF, 0x00, 0x10, 0x10, 0xFF];
const RED: [u8; 9] = [0x00, 0x00, 0x00, 0x00, 0xFF, 0x00, 0x00, 0x10, 0xFF];
#[rtic::app(device = crate::hal::pac, peripherals = true)]
const APP: () = {
struct Resources {
rgb: Spim<SPIM0>,
transfer: Option<Transfer<&'static mut [i16; 128]>>,
}
#[init]
fn init(ctx: init::Context) -> init::LateResources { | // The I2S buffer address must be 4 byte aligned.
static mut RX_BUF: Aligned<[i16; 128]> = Aligned([0; 128]);
let _clocks = hal::clocks::Clocks::new(ctx.device.CLOCK).enable_ext_hfosc();
rtt_init_print!();
rprintln!("Play me some audio...");
let p0 = hal::gpio::p0::Parts::new(ctx.device.P0);
let mck_pin = p0.p0_25.into_floating_input().degrade();
let sck_pin = p0.p0_24.into_floating_input().degrade();
let lrck_pin = p0.p0_16.into_floating_input().degrade();
let sdin_pin = p0.p0_14.into_floating_input().degrade();
// Configure I2S reception
let i2s = I2S::new_peripheral(
ctx.device.I2S,
Some(&mck_pin),
&sck_pin,
&lrck_pin,
Some(&sdin_pin),
None,
);
i2s.enable_interrupt(I2SEvent::RxPtrUpdated).start();
// Configure APA102 RGB LED control
let p1 = hal::gpio::p1::Parts::new(ctx.device.P1);
let rgb_data_pin = p0.p0_08.into_push_pull_output(Level::Low).degrade();
let rgb_clk_pin = p1.p1_09.into_push_pull_output(Level::Low).degrade();
let rgb = Spim::new(
ctx.device.SPIM0,
Pins {
miso: None,
mosi: Some(rgb_data_pin),
sck: rgb_clk_pin,
},
Frequency::M4,
SPIMode {
polarity: Polarity::IdleLow,
phase: Phase::CaptureOnFirstTransition,
},
0,
);
init::LateResources {
rgb,
transfer: i2s.rx(&mut RX_BUF.0).ok(),
}
}
#[task(binds = I2S, resources = [rgb, transfer])]
fn on_i2s(ctx: on_i2s::Context) {
let (rx_buf, i2s) = ctx.resources.transfer.take().unwrap().wait();
if i2s.is_event_triggered(I2SEvent::RxPtrUpdated) {
i2s.reset_event(I2SEvent::RxPtrUpdated);
// Calculate mono summed average of received buffer
let avg = (rx_buf.iter().map(|x| (*x).abs() as u32).sum::<u32>() / rx_buf.len() as u32)
as u16;
let color = match avg {
0..=4 => &OFF,
5..=10_337 => &GREEN,
10_338..=16_383 => &ORANGE,
_ => &RED,
};
<Spim<SPIM0> as Write<u8>>::write(ctx.resources.rgb, color).ok();
}
*ctx.resources.transfer = i2s.rx(rx_buf).ok();
}
};
#[inline(never)]
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
cortex_m::interrupt::disable();
rprintln!("{}", info);
loop {
compiler_fence(Ordering::SeqCst);
}
} | |
launch-list.component.ts | import { LaunchFacadeService } from "./../services/launch-facade.service";
import { Component, ChangeDetectionStrategy } from "@angular/core";
@Component({
selector: "app-launch-list",
templateUrl: "./launch-list.component.html",
styleUrls: ["./launch-list.component.css"],
changeDetection: ChangeDetectionStrategy.OnPush
})
export class | {
constructor(private readonly launchFacade: LaunchFacadeService) {}
pastLaunches$ = this.launchFacade.pastLaunchListStoreCache();
}
| LaunchListComponent |
channel.go | package rabbit
import (
"github.com/streadway/amqp"
)
type Channel struct {
ch *amqp.Channel
}
type PublishData struct {
Exchange string
Key string
Mandatory bool // 如果没有队列 true 返回给生产者 false 会丢掉
Immediate bool // 如果没有消费者 true 返回给生产者,false 丢入队列
Msg *amqp.Publishing
}
// name, kind string, durable, autoDelete, internal, noWait bool, args Table
type Exchange struct {
Name string
Kind string
Durable bool
AutoDelete bool
Internal bool
NoWait bool
Args amqp.Table
}
// name string, durable, autoDelete, exclusive, noWait bool, args Table
type Queue struct {
Name string
Durable bool
AutoDelete bool
Exclusive bool
NoWait bool
Args amqp.Table
}
type ChannelService interface {
Publish(data *PublishData) error
}
func NewChannel(ch *amqp.Channel) *Channel {
return &Channel{ch: ch}
}
func (c *Channel) Publish(data *Publ | h.Publish(data.Exchange, data.Key, data.Mandatory, data.Immediate, *data.Msg)
}
func (c *Channel) ExchangeDeclare(ex *Exchange) error {
return c.ch.ExchangeDeclare(ex.Name, ex.Kind, ex.Durable, ex.AutoDelete, ex.Internal, ex.NoWait, ex.Args)
}
func (c *Channel) QueueDeclare(queue *Queue) (amqp.Queue, error) {
return c.ch.QueueDeclare(queue.Name, queue.Durable, queue.AutoDelete, queue.Exclusive, queue.NoWait, queue.Args)
}
func (c *Channel) QueueBind(queueName, key, exchangeName string, args amqp.Table) error {
return c.ch.QueueBind(queueName, key, exchangeName, true, args)
}
func (c *Channel) Consume(name string) (<-chan amqp.Delivery, error) {
return c.ch.Consume(name, "", false, false, false, false, nil)
}
| ishData) error {
return c.c |
AlipayTradeSettleReceivablesQueryModel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.SettleEntity import SettleEntity
class AlipayTradeSettleReceivablesQueryModel(object):
def __init__(self):
self._biz_product = None
self._extend_params = None
self._merchant_info = None
self._out_request_no = None
@property
def biz_product(self):
return self._biz_product
@biz_product.setter
def biz_product(self, value):
self._biz_product = value
@property
def extend_params(self):
return self._extend_params
@extend_params.setter
def | (self, value):
self._extend_params = value
@property
def merchant_info(self):
return self._merchant_info
@merchant_info.setter
def merchant_info(self, value):
if isinstance(value, SettleEntity):
self._merchant_info = value
else:
self._merchant_info = SettleEntity.from_alipay_dict(value)
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
def to_alipay_dict(self):
params = dict()
if self.biz_product:
if hasattr(self.biz_product, 'to_alipay_dict'):
params['biz_product'] = self.biz_product.to_alipay_dict()
else:
params['biz_product'] = self.biz_product
if self.extend_params:
if hasattr(self.extend_params, 'to_alipay_dict'):
params['extend_params'] = self.extend_params.to_alipay_dict()
else:
params['extend_params'] = self.extend_params
if self.merchant_info:
if hasattr(self.merchant_info, 'to_alipay_dict'):
params['merchant_info'] = self.merchant_info.to_alipay_dict()
else:
params['merchant_info'] = self.merchant_info
if self.out_request_no:
if hasattr(self.out_request_no, 'to_alipay_dict'):
params['out_request_no'] = self.out_request_no.to_alipay_dict()
else:
params['out_request_no'] = self.out_request_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayTradeSettleReceivablesQueryModel()
if 'biz_product' in d:
o.biz_product = d['biz_product']
if 'extend_params' in d:
o.extend_params = d['extend_params']
if 'merchant_info' in d:
o.merchant_info = d['merchant_info']
if 'out_request_no' in d:
o.out_request_no = d['out_request_no']
return o
| extend_params |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.