file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
noms_ds.go
|
// Copyright 2019 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file incorporates work covered by the following copyright and
// permission notice:
//
// Copyright 2016 Attic Labs, Inc. All rights reserved.
// Licensed under the Apache License, version 2.0:
// http://www.apache.org/licenses/LICENSE-2.0
package main
import (
"context"
"fmt"
"os"
flag "github.com/juju/gnuflag"
"github.com/dolthub/dolt/go/store/cmd/noms/util"
"github.com/dolthub/dolt/go/store/config"
"github.com/dolthub/dolt/go/store/d"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/util/verbose"
)
var toDelete string
var nomsDs = &util.Command{
Run: runDs,
UsageLine: "ds [<database> | -d <dataset>]",
Short: "Noms dataset management",
Long: "See Spelling Objects at https://github.com/attic-labs/noms/blob/master/doc/spelling.md for details on the database and dataset arguments.",
Flags: setupDsFlags,
Nargs: 0,
}
func setupDsFlags() *flag.FlagSet {
dsFlagSet := flag.NewFlagSet("ds", flag.ExitOnError)
dsFlagSet.StringVar(&toDelete, "d", "", "dataset to delete")
verbose.RegisterVerboseFlags(dsFlagSet)
|
func runDs(ctx context.Context, args []string) int {
cfg := config.NewResolver()
if toDelete != "" {
db, set, err := cfg.GetDataset(ctx, toDelete)
util.CheckError(err)
defer db.Close()
oldCommitRef, errBool, err := set.MaybeHeadRef()
d.PanicIfError(err)
if !errBool {
util.CheckError(fmt.Errorf("Dataset %v not found", set.ID()))
}
_, err = set.Database().Delete(ctx, set)
util.CheckError(err)
fmt.Printf("Deleted %v (was #%v)\n", toDelete, oldCommitRef.TargetHash().String())
} else {
dbSpec := ""
if len(args) >= 1 {
dbSpec = args[0]
}
store, err := cfg.GetDatabase(ctx, dbSpec)
util.CheckError(err)
defer store.Close()
dss, err := store.Datasets(ctx)
if err != nil {
fmt.Fprintln(os.Stderr, "failed to get datasets")
return 1
}
_ = dss.IterAll(ctx, func(k, v types.Value) error {
fmt.Println(k)
return nil
})
}
return 0
}
|
return dsFlagSet
}
|
metal3remediation_manager.go
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package baremetal
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/cache"
bmh "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
capm3 "github.com/metal3-io/cluster-api-provider-metal3/api/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
capi "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
rebootAnnotation = "reboot.metal3.io"
)
// RemediationManagerInterface is an interface for a RemediationManager
type RemediationManagerInterface interface {
SetFinalizer()
UnsetFinalizer()
TimeToRemediate(timeout time.Duration) (bool, time.Duration)
SetRebootAnnotation(ctx context.Context) error
SetUnhealthyAnnotation(ctx context.Context) error
GetUnhealthyHost(ctx context.Context) (*bmh.BareMetalHost, *patch.Helper, error)
OnlineStatus(host *bmh.BareMetalHost) bool
GetRemediationType() capm3.RemediationType
RetryLimitIsSet() bool
SetRemediationPhase(phase string)
GetRemediationPhase() string
GetLastRemediatedTime() *metav1.Time
SetLastRemediationTime(remediationTime *metav1.Time)
HasReachRetryLimit() bool
GetTimeout() *metav1.Duration
IncreaseRetryCount()
SetOwnerRemediatedConditionNew(ctx context.Context) error
GetCapiMachine(ctx context.Context) (*capi.Machine, error)
}
// RemediationManager is responsible for performing remediation reconciliation
type RemediationManager struct {
Client client.Client
Metal3Remediation *capm3.Metal3Remediation
Metal3Machine *capm3.Metal3Machine
Machine *capi.Machine
Log logr.Logger
}
// NewRemediationManager returns a new helper for managing a Metal3Remediation object
func NewRemediationManager(client client.Client,
metal3remediation *capm3.Metal3Remediation, metal3Machine *capm3.Metal3Machine, machine *capi.Machine,
remediationLog logr.Logger) (*RemediationManager, error) {
return &RemediationManager{
Client: client,
Metal3Remediation: metal3remediation,
Metal3Machine: metal3Machine,
Machine: machine,
Log: remediationLog,
}, nil
}
// SetFinalizer sets finalizer
func (r *RemediationManager) SetFinalizer() {
// If the Metal3Remediation doesn't have finalizer, add it.
if !Contains(r.Metal3Remediation.Finalizers, capm3.RemediationFinalizer) {
r.Metal3Remediation.Finalizers = append(r.Metal3Remediation.Finalizers,
capm3.RemediationFinalizer,
)
}
}
// UnsetFinalizer unsets finalizer
func (r *RemediationManager) UnsetFinalizer() {
// Cluster is deleted so remove the finalizer.
r.Metal3Remediation.Finalizers = Filter(r.Metal3Remediation.Finalizers,
capm3.RemediationFinalizer,
)
}
// timeToRemediate checks if it is time to execute a next remediation step
// and returns seconds to next remediation time
func (r *RemediationManager) TimeToRemediate(timeout time.Duration) (bool, time.Duration) {
now := time.Now()
// status is not updated yet
if r.Metal3Remediation.Status.LastRemediated == nil {
return false, timeout
}
if r.Metal3Remediation.Status.LastRemediated.Add(timeout).Before(now) {
return true, time.Duration(0)
}
lastRemediated := now.Sub(r.Metal3Remediation.Status.LastRemediated.Time)
nextRemediation := timeout - lastRemediated + time.Second
return false, nextRemediation
}
// SetRebootAnnotation sets reboot annotation on unhealthy host
func (r *RemediationManager) SetRebootAnnotation(ctx context.Context) error {
host, helper, err := r.GetUnhealthyHost(ctx)
if err != nil {
return err
}
if host == nil {
return errors.New("Unable to set an Reboot Annotation, Host not found")
}
r.Log.Info("Adding Reboot annotation to host", host.Name)
rebootMode := bmh.RebootAnnotationArguments{}
rebootMode.Mode = bmh.RebootModeHard
marshalledMode, err := json.Marshal(rebootMode)
if err != nil {
return err
}
host.Annotations[rebootAnnotation] = string(marshalledMode)
return helper.Patch(ctx, host)
}
// SetUnhealthyAnnotation sets capm3.UnhealthyAnnotation on unhealthy host
func (r *RemediationManager) SetUnhealthyAnnotation(ctx context.Context) error {
host, helper, err := r.GetUnhealthyHost(ctx)
if err != nil {
return err
}
if host == nil {
return errors.New("Unable to set an Unhealthy Annotation, Host not found")
}
r.Log.Info("Adding Unhealthy annotation to host", host.Name)
host.Annotations[capm3.UnhealthyAnnotation] = "capm3/UnhealthyNode"
return helper.Patch(ctx, host)
}
// getUnhealthyHost gets the associated host for unhealthy machine. Returns nil if not found. Assumes the
// host is in the same namespace as the unhealthy machine.
func (r *RemediationManager) GetUnhealthyHost(ctx context.Context) (*bmh.BareMetalHost, *patch.Helper, error) {
host, err := getUnhealthyHost(ctx, r.Metal3Machine, r.Client, r.Log)
if err != nil || host == nil {
return host, nil, err
}
helper, err := patch.NewHelper(host, r.Client)
return host, helper, err
}
func getUnhealthyHost(ctx context.Context, m3Machine *capm3.Metal3Machine, cl client.Client,
rLog logr.Logger,
) (*bmh.BareMetalHost, error) {
annotations := m3Machine.ObjectMeta.GetAnnotations()
if annotations == nil {
err := fmt.Errorf("unable to get %s annotations", m3Machine.Name)
return nil, err
}
hostKey, ok := annotations[HostAnnotation]
if !ok
|
hostNamespace, hostName, err := cache.SplitMetaNamespaceKey(hostKey)
if err != nil {
rLog.Error(err, "Error parsing annotation value", "annotation key", hostKey)
return nil, err
}
host := bmh.BareMetalHost{}
key := client.ObjectKey{
Name: hostName,
Namespace: hostNamespace,
}
err = cl.Get(ctx, key, &host)
if apierrors.IsNotFound(err) {
rLog.Info("Annotated host not found", "host", hostKey)
return nil, err
} else if err != nil {
return nil, err
}
return &host, nil
}
// onlineStatus returns hosts Online field value
func (r *RemediationManager) OnlineStatus(host *bmh.BareMetalHost) bool {
return host.Spec.Online
}
// getRemediationType return type of remediation strategy
func (r *RemediationManager) GetRemediationType() capm3.RemediationType {
return r.Metal3Remediation.Spec.Strategy.Type
}
// retryLimitIsSet returns true if retryLimit is set, false if not
func (r *RemediationManager) RetryLimitIsSet() bool {
return r.Metal3Remediation.Spec.Strategy.RetryLimit > 0
}
// HasReachRetryLimit returns true if retryLimit is reached
func (r *RemediationManager) HasReachRetryLimit() bool {
return r.Metal3Remediation.Spec.Strategy.RetryLimit == r.Metal3Remediation.Status.RetryCount
}
// SetRemediationPhase setting the state of the remediation
func (r *RemediationManager) SetRemediationPhase(phase string) {
r.Log.Info("Switching remediation phase", "remediationPhase", phase)
r.Metal3Remediation.Status.Phase = phase
}
// GetRemediationPhase returns current status of the remediation
func (r *RemediationManager) GetRemediationPhase() string {
return r.Metal3Remediation.Status.Phase
}
// GetLastRemediatedTime returns last remediation time
func (r *RemediationManager) GetLastRemediatedTime() *metav1.Time {
return r.Metal3Remediation.Status.LastRemediated
}
// SetLastRemediationTime setting last remediation timestamp on Status
func (r *RemediationManager) SetLastRemediationTime(remediationTime *metav1.Time) {
r.Log.Info("Last remediation time", "remediationTime", remediationTime)
r.Metal3Remediation.Status.LastRemediated = remediationTime
}
// GetTimeout returns timeout duration from remediation request Spec
func (r *RemediationManager) GetTimeout() *metav1.Duration {
return r.Metal3Remediation.Spec.Strategy.Timeout
}
// IncreaseRetryCount increases the retry count on Status
func (r *RemediationManager) IncreaseRetryCount() {
r.Metal3Remediation.Status.RetryCount++
}
func (r *RemediationManager) SetOwnerRemediatedConditionNew(ctx context.Context) error {
capiMachine, err := r.GetCapiMachine(ctx)
if err != nil {
r.Log.Info("Unable to fetch CAPI Machine")
return err
}
machineHelper, err := patch.NewHelper(capiMachine, r.Client)
if err != nil {
r.Log.Info("Unable to create patch helper for Machine")
return err
}
conditions.MarkFalse(capiMachine, capi.MachineOwnerRemediatedCondition, capi.WaitingForRemediationReason, capi.ConditionSeverityWarning, "")
err = machineHelper.Patch(ctx, capiMachine)
if err != nil {
r.Log.Info("Unable to patch Machine %d", capiMachine)
return err
}
return nil
}
func (r *RemediationManager) GetCapiMachine(ctx context.Context) (*capi.Machine, error) {
capiMachine, err := util.GetOwnerMachine(ctx, r.Client, r.Metal3Remediation.ObjectMeta)
if err != nil {
r.Log.Error(err, "metal3Remediation's owner Machine could not be retrieved")
return nil, errors.Wrapf(err, "metal3Remediation's owner Machine could not be retrieved")
}
return capiMachine, nil
}
|
{
err := fmt.Errorf("unable to get %s HostAnnotation", m3Machine.Name)
return nil, err
}
|
UpdateClusterCommand.ts
|
import * as __aws_sdk_middleware_stack from "@aws-sdk/middleware-stack";
import * as __aws_sdk_types from "@aws-sdk/types";
import * as _stream from "stream";
import { UpdateCluster } from "../model/operations/UpdateCluster";
import { InputTypesUnion } from "../types/InputTypesUnion";
import { OutputTypesUnion } from "../types/OutputTypesUnion";
import { UpdateClusterInput } from "../types/UpdateClusterInput";
import { UpdateClusterOutput } from "../types/UpdateClusterOutput";
import { SnowballResolvedConfiguration } from "../SnowballConfiguration";
export * from "../types/UpdateClusterInput";
export * from "../types/UpdateClusterOutput";
export * from "../types/UpdateClusterExceptionsUnion";
export class
|
implements
__aws_sdk_types.Command<
InputTypesUnion,
UpdateClusterInput,
OutputTypesUnion,
UpdateClusterOutput,
SnowballResolvedConfiguration,
_stream.Readable
> {
readonly model = UpdateCluster;
readonly middlewareStack = new __aws_sdk_middleware_stack.MiddlewareStack<
UpdateClusterInput,
UpdateClusterOutput,
_stream.Readable
>();
constructor(readonly input: UpdateClusterInput) {}
resolveMiddleware(
clientStack: __aws_sdk_middleware_stack.MiddlewareStack<
InputTypesUnion,
OutputTypesUnion,
_stream.Readable
>,
configuration: SnowballResolvedConfiguration
): __aws_sdk_types.Handler<UpdateClusterInput, UpdateClusterOutput> {
const { handler } = configuration;
const stack = clientStack.concat(this.middlewareStack);
const handlerExecutionContext: __aws_sdk_types.HandlerExecutionContext = {
logger: {} as any,
model: this.model
};
return stack.resolve(
handler<UpdateClusterInput, UpdateClusterOutput>(handlerExecutionContext),
handlerExecutionContext
);
}
}
|
UpdateClusterCommand
|
ctf.py
|
"""
Functions related to ctf.
Currently only few that allow running ctffind from console or notebook.
Work in progress.
# Author: Vladan Lucic (Max Planck Institute for Biochemistry)
# $Id$
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
from past.builtins import basestring
__version__ = "$Revision$"
import os
import subprocess
import logging
import numpy as np
import matplotlib.pyplot as plt
import pyto.util.nested
from pyto.io.image_io import ImageIO
from pyto.grey.image import Image
class Ctf(object):
"""
Determination of CTF by external tools
"""
# prefix for validation attributed obtained from gctf
validation_prefix = "validation_"
# default params ctffind 4.0.17, also 4.1
default_params_ctffind = {
"pixel_a":1, "cs":2.7, "amp":0.1, "phase":"no", 'box':512,
'min_res':30, 'max_res':5, 'min_def':5000, 'max_def':50000,
'def_step':500, 'astig':100, 'known_astig':'no', 'slow_search':'yes',
'restraint_astig':'yes', 'tolerated_astig':200,
'phase':'yes', 'min_phase':0, 'max_phase':2, 'phase_step':0.1,
'expert':'no'}
# parameter list for ctffind 4.0.17 (currently not used, left for reference)
param_names_ctffind_4_0 = [
'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
'min_def', 'max_def', 'def_step', 'astig', 'phase',
'min_phase', 'max_phase', 'phase_step']
# default parameter list for 4.1; consistent with default_params_ctffind
param_names_ctffind_4_1 = [
'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
'min_def', 'max_def', 'def_step', 'known_astig', 'slow_search',
'restraint_astig','tolerated_astig',
'phase', 'min_phase', 'max_phase', 'phase_step', 'expert']
def __init__(self):
"""
Initializes common attributes
"""
# attributes
self.image_path_orig = []
self.image_inds = []
self.image_path = []
self.ctf_path = []
|
self.defoci = []
self.resolution = []
self.pixel_a = []
self.angle = []
@classmethod
def find(
cls, image_dir, image_prefix, ctf_dir, params, pixel_a=None,
flatten='auto', tool='ctffind', executable=None,
param_file='ctf_params.txt', fast=False, max_images=None,
plot_ctf=True, plot_ps=True, b_plot=True, exp_f_plot=False,
show_legend=True, plot_phases=True, plot_defoci=True,
plot_resolution=True, print_results=True, print_validation=False):
"""
Determines and shows CTF fits for multiple images.
All files located in (arg) image_dir whose namess start with (arg)
image_prefix and that have extension mrc, em or st are selected
for the ctf determination.
If a selected file is 3D (image stack), and arg flatten is True or
'auto', all z-slices are summed up (saved in ctf_dir) and the ctf
is detemined on the resulting (flattened. Alternatively, if arg
flatten is False, z-slices are extracted, saved in ctf_dir and
analyzed separately.
All resulting files, as well as the extraced or flattened images
(in case of 3D files) are saved or moved to directory ctf_dir.
CTF is determined using external tools. Current options are:
- CTFFIND
- gCTF
These tools have to be installed externally.
Parameters for the ctf tools are specified as a dictionary (arg params).
Parameters used for both ctffind and gctf are:
- 'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
'min_def', 'max_def', 'def_step', 'astig', 'phase',
'min_phase', 'max_phase', 'phase_step'
Voltage ('voltage') should always be specified. The pixel size
(pixel_a) has to be specified in case it can not be read from
the image header. All other parameters are optional, if they are
not specified the ctffind / gctg default values are used.
The default values should be fine for single particle images.
Parameter recommendations for phase plate images are given in
the ctffind / gctf documentation.
In case of ctffind, arg params can also be a list containing the
parameter values in the same order as specified above, starting
with voltage.
Important for ctffind: Because the required arguments differ between
versions 4.0 and 4.1, as well as depend on values specified, it is
not guaranteed that the dictionary form of arg params will work.
In case of problems, specify params as a list.
In addition, all other gctf arguments can also be specified
(without '--'). It is suggested to use:
'do_EPA':'', 'do_validation':''
Parameter units are the same as in the ctf deterimantion tools.
Intended for use in an environment such as Jupyter notebook.
Arguments:
- image_dir: directory where images reside
- image prefix: beginning of image file(s)
- ctf_dir: directory where the ctf determination results and
extracted images are saved
- pixel_a: pixel size in A
- params: ctf determination parameters
- flatten: indicated whether 3D images should be flatten (True or
'auto') or not (False).
- tool: name of the ctf detmination tool
- executable: ctf tool executable
- param_file: name of the temporary parameter file
- fast: flag indicating whether ctffind --fast option is used
- print_results: flag indicating if phase and defoci found
are printed for each analyzed image
- plot_ctf: flag indicating whether ctf is plotted for each
analyzed image
- show_legend: flag indicating whether a legend is shown on ctf graphs
- plot_phases, plot_defoci: flags indicating whether a graph
containing phases and defoci of all images respectivelly are plotted
- max_images: max number if image analyzed, for testing
Returns an instance of this class. The following attributes are all
lists where elements correspond to individual images:
- image_path_orig: image path of the input file
- image_path: image path of the image that is actually used
to deterime ctf. It differs from image_path_orig if the original
(input) image is a stack that is flattened or used to extract slices
- image_inds: index of a slice extracted for a stack
- ctf_path: path of the ctf fit image
- defocus_1, defocus_2, defocus: defoci along the two axes and the
mean defocus in um
- angle: defocus (astigmatism) angle
- phase: phase shift in multiples of pi
- resolution: resolution in nm
- ccc: correlation coefficient
- pixel_a: pixel size in A
- b_factor: b-factor (gctf only)
"""
# initialize
index = 0
new = cls()
print_head = True
if plot_ctf and fast:
print(
"Warning: CTF will not be plotted because fast execution"
+ " was chosen")
# check which ctf tool to use
if tool == 'ctffind':
if executable is None:
executable = 'ctffind'
elif tool == 'gctf':
if executable is None:
executable = 'gctf'
else:
raise ValueError(
"CTF determination tool " + str(tool) + " was not understood.")
new.tool = tool
# cftfind on all images
file_list = np.sort(os.listdir(image_dir))
for image_name in file_list:
# skip files that are not images
if not image_name.startswith(image_prefix): continue
if not (image_name.endswith('.mrc') or image_name.endswith('.st')
or image_name.endswith('.em')):
continue
if image_name.endswith('ctf.mrc'): continue
# set input image path
image_path = os.path.join(image_dir, image_name)
# figure out if to flatten or not (just once, assume all files
# are the same)
im_io = ImageIO(file=image_path)
if image_name.endswith('.st'):
im_io.readHeader(fileFormat='mrc')
else:
im_io.readHeader()
z_dim = im_io.shape[2]
n_digits = int(np.ceil(np.log10(z_dim)))
if isinstance(flatten, bool):
pass
elif isinstance(flatten, basestring) and (flatten == 'auto'):
if z_dim > 1:
flatten = True
else:
flatten = False
else:
raise ValueError(
"Argument flatten: "+ str(flatten) +" was not understood.")
# load stack and prepare image name, if need to extract images
if (z_dim > 1) and not flatten:
image_dir, image_name = os.path.split(image_path)
image_base, image_extension = image_name.rsplit('.', 1)
image_name_new_tmplt = (
image_base + '_%0' + str(n_digits) + 'd.mrc')
if image_name.endswith('.st'):
stack = Image.read(
image_path, memmap=True, fileFormat='mrc')
else:
stack = Image.read(image_path, memmap=True)
else:
image_path_to_read = image_path
# find ctf of the current image or stack
for image_in_stack_ind in range(z_dim):
# extract and save images if needed
if (z_dim > 1) and not flatten:
if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)
image_path_to_read = os.path.join(
ctf_dir, (image_name_new_tmplt % image_in_stack_ind))
one_image = Image()
one_image.data = stack.data[:,:,image_in_stack_ind]
one_image.write(
file=image_path_to_read, pixel=stack.pixelsize)
# save image path retlated
new.image_path_orig.append(image_path)
new.image_inds.append(image_in_stack_ind)
new.image_path.append(image_path_to_read)
# find ctf
if tool == 'ctffind':
# ctffind
res_one = cls.ctffind(
image_path=image_path_to_read, flatten=flatten,
ctf_dir=ctf_dir, executable=executable,
pixel_a=pixel_a, params=params,
param_file=param_file, fast=fast, print_head=print_head,
print_results= print_results,
plot_ctf=plot_ctf, show_legend=show_legend)
elif tool == 'gctf':
# gctf
res_one = cls.gctf(
image_path=image_path_to_read, params=params,
pixel_a=pixel_a, flatten=flatten, ctf_dir=ctf_dir,
executable=executable,
plot_ctf=plot_ctf, plot_ps=plot_ps ,b_plot=b_plot,
exp_f_plot=exp_f_plot, show_legend=show_legend,
print_results=print_results,
print_head=print_head,
print_validation=print_validation)
# save gctf specific data
try:
new.b_factor.append(res_one['b_factor'])
except AttributeError:
new.b_factor = [res_one['b_factor']]
for name, value in list(res_one.items()):
if name.startswith(cls.validation_prefix):
try:
previous_val = getattr(new, name)
previous_val.append(value)
setattr(new, name, previous_val)
except AttributeError:
setattr(new, name, [value])
else:
raise ValueError("Sorry tool: " + tool + " was not found.")
# save data common for ctffind and gctf
new.phases.append(res_one["phase"])
new.defoci.append(res_one["defocus"])
new.defoci_1.append(res_one['defocus_1'])
new.defoci_2.append(res_one['defocus_2'])
new.resolution.append(res_one['resolution'])
new.pixel_a.append(res_one['pixel_a'])
new.angle.append(res_one['angle'])
new.ctf_path.append(res_one['ctf_path'])
# keep track of n images processed so far
print_head = False
index = index + 1
if (max_images is not None) and (index > max_images): break
if flatten: break
# plot phases
if plot_phases:
plt.figure()
plt.bar(list(range(index)), new.phases)
plt.plot([0, index], [0.5, 0.5], 'r--')
plt.ylabel('Phase shift [$\pi$]')
plt.xlabel('Images')
plt.title("Phase shift summary")
# plot defocus
if plot_defoci:
plt.figure()
plt.bar(list(range(index)), new.defoci)
plt.ylabel('Defocus [$\mu m$]')
plt.xlabel('Images')
plt.title("Defocus summary")
# plot resolution
if plot_resolution:
plt.figure()
plt.bar(list(range(index)), new.resolution)
plt.ylabel('Resolution [nm]')
plt.xlabel('Images')
plt.title("Resolution summary")
return new
@classmethod
def ctffind(
cls, image_path, ctf_dir, params, pixel_a=None, flatten=False,
executable='ctffind', param_file='ctf_params.txt', fast=False,
print_results=True, print_head=True,
plot_ctf=True, show_legend=True):
"""
Determines and shows CTF fits of one image using ctffind.
See find() for more information.
"""
# make ctf dir if doesn't exist
if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)
# find pixel size
if pixel_a is None:
pixel_a = cls.read_pixel_size(image_path=image_path)
# flatten frame stack
if flatten:
image_path = cls.flatten_stack(
stack_path=image_path, flat_dir=ctf_dir)
# default params ctffind 4.0.17 (moved to top of this file anyway)
#default_params = {
# "pixel_a":1, "cs":2.7, "amp":0.1, "phase":"no", 'box':512,
# 'min_res':30, 'max_res':5, 'min_def':5000, 'max_def':50000,
# 'def_step':500, 'astig':100, 'phase':'no', 'min_phase':0,
# 'max_phase':2, 'phase_step':0.1}
#param_names = [
# 'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
# 'min_def', 'max_def', 'def_step', 'astig', 'phase',
# 'min_phase', 'max_phase', 'phase_step']
# keep params if list, add default if dict
if isinstance(params, list):
comb_params = [pixel_a] + params
elif isinstance(params, dict):
params_dict = cls.default_params_ctffind.copy()
params_dict.update(params)
params_dict['pixel_a'] = pixel_a
param_names = cls.make_param_names_ctffind(params=params_dict)
comb_params = [params_dict[name] for name in param_names]
# set ctffind out paths
image_dir, image_name = os.path.split(image_path)
image_base, image_extension = image_name.rsplit('.', 1)
ctf_path = os.path.join(ctf_dir, image_base + '_ctf.mrc')
ctf_txt_path = os.path.join(ctf_dir, image_base + '_ctf.txt')
ctf_avrot_path = os.path.join(ctf_dir, image_base + '_ctf_avrot.txt')
# wite ctf parameters to a file
param_path = os.path.join(ctf_dir, param_file)
pf = open(param_path, 'w')
pf.write(image_path + '\n')
pf.write(ctf_path + '\n')
str_params = [str(par) + '\n' for par in comb_params]
pf.writelines(str_params)
pf.flush()
# execute ctffind
# shell commands that work:
# - ctffind < param_path
# - cat params.txt | ctffind
#print(image)
if fast:
ctf_cmd = [executable, '--fast']
else:
ctf_cmd = [executable]
try:
subprocess.check_call(ctf_cmd, stdin=open(param_path))
except Exception as exc:
# workaround for ctffind command returning code 255 (4.1.8, 09.2018)
logging.debug('CalledProcessError: ' + str(exc))
# read results:
ctf_txt = np.loadtxt(ctf_txt_path)
results = {
"defocus_1":ctf_txt[1]/10000., "defocus_2":ctf_txt[2]/10000.,
"angle" : ctf_txt[3], "phase":old_div(ctf_txt[4],np.pi),
"ccc" : ctf_txt[5], "resolution" : ctf_txt[6] / 10.,
'pixel_a':pixel_a}
results['defocus'] = (results['defocus_1'] + results['defocus_2']) / 2.
results['ctf_path'] = ctf_path
# prepare header for defoci and phases
if print_head:
left_space = ' ' * old_div((len(image_name) - 5), 2)
right_space = ' ' *old_div ((len(image_name) - 4), 2)
head_1 = (
left_space + "Image" + right_space +
" Defocus 1 Defocus 2 Phase Resolution")
head_2 = (
left_space + " " + right_space +
" um um [pi] nm ")
# prepare results
if print_results:
data_format = '%s %6.2f %6.2f %6.2f %6.2f '
data_vars = (
image_name, results["defocus_1"], results["defocus_2"],
results["phase"], results["resolution"])
# print
if print_head:
print(head_1)
print(head_2)
if print_results:
print(data_format % data_vars)
# plot ctf
if plot_ctf:
plt.figure()
avrot_data = np.loadtxt(ctf_avrot_path)
x_data = avrot_data[0] / pixel_a
plt.plot(x_data, avrot_data[2], 'g-', label='PS')
plt.plot(
x_data, avrot_data[3], color='orange', linewidth=2,
label='CTF fit')
plt.plot(
x_data, avrot_data[4], color='blue', linewidth=2,
label='Quality')
plt.ylim(-0.1, 1.1)
plt.xlabel("Spatial frequency [1/A])")
plt.ylabel("Amplitude")
if show_legend: plt.legend()
plt.show()
return results
@classmethod
def make_param_names_ctffind(cls, params):
"""
Makes a list of parameter names that's suitable for ctffind 4.1 and
it is in accordance with the specified params.
Argument:
- params: dict of parameters
Returns parameter list
"""
# optional parts
if params['restraint_astig'] in ['yes', 'y']:
restraint_astig_part = ['restraint_astig','tolerated_astig']
else:
restraint_astig_part = ['restraint_astig']
if (params['phase'] == 'yes') or (params['phase'] == 'y'):
phase_part = ['phase', 'min_phase', 'max_phase', 'phase_step']
else:
phase_part = ['phase']
# combine
param_names = (
cls.param_names_ctffind_4_1[:12] + restraint_astig_part
+ phase_part + ['expert'])
return param_names
@classmethod
def gctf(
cls, image_path, ctf_dir, params, pixel_a=None, flatten=False,
executable='gctf', plot_ps=True, plot_ctf=True,
b_plot=True, exp_f_plot=False, show_legend=True,
print_results=True, print_head=True, print_validation=False):
"""
Determines and shows CTF fits of one image using gctf.
See find() for more information.
"""
# make ctf dir if doesn't exist
if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)
# find pixel size
if pixel_a is None:
pixel_a = cls.read_pixel_size(image_path=image_path)
# flatten frame stack if needed
if flatten:
image_path = cls.flatten_stack(
stack_path=image_path, flat_dir=ctf_dir)
# prepare parameters
gctf_names = {
'pixel_a':'apix', 'voltage':'kV', 'cs':'Cs', 'amp':'ac',
'box':'boxsize', 'min_res':'resL', 'max_res':'resH',
'min_def':'defL', 'max_def':'defH', 'def_step':'defS',
'astig':'astm', 'phase':'phase', 'min_phase':'phase_shift_L',
'max_phase':'phase_shift_H', 'phase_step':'phase_shift_S'}
params["pixel_a"] = pixel_a
params_list = [
["--" + gctf_names.get(key, key), str(val)]
for key, val in list(params.items())]
params_list = pyto.util.nested.flatten(params_list)
params_list = [par for par in params_list if len(par) > 0]
#print(params_list)
# execute ctffind
ctf_cmd = [executable] + params_list + [image_path]
call_status = subprocess.check_call(ctf_cmd)
# set gctf out paths
image_dir, image_name = os.path.split(image_path)
image_base, image_extension = image_name.rsplit('.', 1)
epa_path = os.path.join(ctf_dir, image_base + '_EPA.log')
gctf_path = os.path.join(ctf_dir, image_base + '_gctf.log')
ctf_path = os.path.join(ctf_dir, image_base + '.ctf')
tmp_epa_path = os.path.join(image_dir, image_base + '_EPA.log')
tmp_gctf_path = os.path.join(image_dir, image_base + '_gctf.log')
tmp_ctf_path = os.path.join(image_dir, image_base + '.ctf')
# move generated files to ctf_dir
if image_dir != ctf_dir:
call_status = subprocess.check_call(['mv', tmp_epa_path, epa_path])
call_status = subprocess.check_call(
['mv', tmp_gctf_path, gctf_path])
call_status = subprocess.check_call(['mv', tmp_ctf_path, ctf_path])
call_status = subprocess.check_call(
['mv', 'micrographs_all_gctf.star', ctf_dir])
# read results
in_last_cycle = False
in_last_cycle_data = False
validation_lines = []
for line in open(gctf_path):
# read defocus
if line.find('LAST CYCLE') >= 0:
in_last_cycle = True
#print line.strip('\n')
elif in_last_cycle and (line.find('Defocus_U') >= 0):
#print line.strip('\n')
head_split = line.strip().split()
in_last_cycle_data = True
elif in_last_cycle_data:
#print line.strip('\n')
data_split = line.strip().split()[:-2]
in_last_cycle_data = False
# read res limit and b factor
elif in_last_cycle and line.startswith('Resolution limit'):
resolution = float(line.split()[-1])
elif in_last_cycle and line.startswith('Estimated Bfactor'):
b_factor = float(line.split()[-1])
in_last_cycle = False
# read validation
elif line.find('VALIDATION_SCORE') >= 0:
validation_lines.append(line.strip('\n'))
# extract results
results_native = dict(
[(head, float(value))
for head, value in zip(head_split, data_split)])
results_native["Defocus_U"] = results_native["Defocus_U"] / 10000.
results_native["Defocus_V"] = results_native["Defocus_V"] / 10000.
#print(results_native)
key_dict = {
"Defocus_U":"defocus_1", "Defocus_V":"defocus_2",
"Angle":"angle", "CCC":"ccc", "Phase_shift":"phase"}
results = dict([
(key_dict[old_key], value)
for old_key, value in list(results_native.items())])
results['defocus'] = (results['defocus_1'] + results['defocus_2']) / 2.
results['phase'] = results.get('phase', 0) / 180.
results["resolution"] = resolution / 10.
results["b_factor"] = b_factor
#if results.get("phase") is None: results["phase"] = 0
results['ctf_path'] = ctf_path
results['pixel_a'] = pixel_a
for val_line in validation_lines:
val_list = val_line.strip().split()
name_suf = val_list[0].replace('-', '_')
results[cls.validation_prefix + name_suf] = int(val_list[-1])
# prepare header for defoci and phases
if print_head:
left_space = ' ' * (old_div((len(image_name) - 5), 2))
right_space = ' ' * (old_div((len(image_name) - 4), 2))
head_1 = (
left_space + "Image" + right_space +
" Defocus 1 Defocus 2 Phase Resolution")
head_2 = (
left_space + " " + right_space +
" um um [pi] nm ")
# prepare results
if print_results:
data_format = '%s %6.2f %6.2f %6.2f %6.2f '
data_vars = (
image_name, results["defocus_1"], results["defocus_2"],
results["phase"], results["resolution"])
# add validation to header and results
val_names = np.sort(
[val_nam for val_nam in results
if val_nam.startswith(cls.validation_prefix)])[::-1]
for val_nam in val_names:
if print_head:
head_1 += (" " + val_nam.split(cls.validation_prefix, 1)[1])
head_2 += " "
if print_results:
data_format += ' %2d '
data_vars += (results[val_nam],)
# print
if print_head:
print(head_1)
print(head_2)
if print_results:
print(data_format % data_vars)
# print validation
if print_validation:
for val_line in validation_lines:
print(val_line)
# plot ctf
epa = np.loadtxt(epa_path, skiprows=1)
if plot_ps:
plt.figure()
plt.plot(1./epa[:,0], epa[:,2])
plt.ylabel('ln(|F|)')
#if show_legend: plt.legend()
plt.show()
if plot_ctf:
plt.figure()
if b_plot:
exp_b = np.exp(-b_factor * 1./epa[:,0]**2 / 4.)
else:
exp_b = 1
plt.plot(1./epa[:,0], epa[:,1] * exp_b, label="CTF fit")
if exp_f_plot:
plt.plot(
1./epa[:,0], np.exp(epa[:,3]), label="$e^{ln(|F|-Bg)}$")
else:
plt.plot(1./epa[:,0], epa[:,3], label="$ln(|F|-Bg)$")
plt.xlabel('Resolution [1/A]')
if show_legend: plt.legend()
plt.show()
# return
return results
@classmethod
def read_pixel_size(cls, image_path):
"""
Reads pixel size from an image file.
Raises ValueError if pixel size can not be read from the image
Argument:
- image_path: image path
Returns: pixel size in A
"""
image_io = ImageIO()
if image_path.endswith('.st'):
image_io.readHeader(file=image_path, fileFormat='mrc')
else:
image_io.readHeader(file=image_path)
if image_io.pixel is not None:
if isinstance(image_io.pixel, (list, tuple)):
pixel_a = 10 * image_io.pixel[0]
else:
pixel_a = 10 * image_io.pixel
else:
raise ValueError(
"Pixel size could not be found from image " + image_path +
". Please specify pixel_a as an argument.")
# in case of 0 pix size
if pixel_a == 0:
raise ValueError(
"Pixel size could not be found from image " + image_path +
". Please specify pixel_a as an argument.")
return pixel_a
@classmethod
def flatten_stack(cls, stack_path, flat_dir):
"""
Flattens image stack, that is sums up all z-slices and writes
the resulting (flat) image).
Arguments:
- stack_path: path to the image stack
- flat_path: path where the resulting image is saved
Returns resulting image path
"""
# parse stack path
stack_dir, stack_name = os.path.split(stack_path)
stack_base, stack_extension = stack_name.rsplit('.', 1)
if stack_extension == 'st':
stack_extension = 'mrc'
file_format = 'mrc'
else:
file_format = None
# read, flatten and write
flat_path = os.path.join(
flat_dir, stack_base + '_flat.' + stack_extension)
frame = Image.read(file=stack_path, fileFormat=file_format)
frame.data = np.sum(frame.data, axis=2, dtype=frame.data.dtype)
frame.write(file=flat_path, pixel=frame.pixelsize)
return flat_path
|
self.phases = []
self.defoci_1 = []
self.defoci_2 = []
|
utf8.rs
|
use super::{user_with_internal, Error};
use std::string::FromUtf8Error;
impl std::convert::From<FromUtf8Error> for Error {
fn from(err: FromUtf8Error) -> Self
|
}
|
{
user_with_internal(
"We could not parse the UTF-8 content we received.",
"Make sure that you are not providing git-tool with content which is invalid UTF-8.",
err,
)
}
|
test_model_category.py
|
from conftest import add_permissions, check_dictionary
from core import NewJSONEncoder, cache
from forums.models import ForumCategory
def test_category_from_pk(app, authed_client):
category = ForumCategory.from_pk(1)
assert category.name == 'Site'
assert category.description == 'General site discussion'
def test_category_cache(app, authed_client):
category = ForumCategory.from_pk(1)
cache.cache_model(category, timeout=60)
category = ForumCategory.from_pk(1)
assert category.name == 'Site'
assert category.description == 'General site discussion'
assert cache.ttl(category.cache_key) < 61
def test_category_get_all(app, authed_client):
categories = ForumCategory.get_all()
assert len(categories) == 3
for category in categories:
if category.name == 'Site' and category.id == 1:
break
else:
raise AssertionError('A real forum not called')
def test_category_get_all_cached(app, authed_client):
cache.set(ForumCategory.__cache_key_all__, [1, 3], timeout=60)
categories = ForumCategory.get_all()
assert len(categories) == 2
for category in categories:
if category.name == 'Site' and category.id == 1:
break
else:
raise AssertionError('A real forum not called')
def test_new_category(app, authed_client):
category = ForumCategory.new(
name='NewCategory', description=None, position=100
)
assert category.name == 'NewCategory'
assert category.description is None
assert category.position == 100
assert ForumCategory.from_cache(category.cache_key).id == category.id == 6
def
|
(app, authed_client):
category = ForumCategory.from_pk(1)
data = NewJSONEncoder().default(category)
check_dictionary(
data,
{
'id': 1,
'name': 'Site',
'description': 'General site discussion',
'position': 1,
},
)
assert 'forums' in data and len(data['forums']) == 2
def test_serialize_very_detailed(app, authed_client):
add_permissions(app, 'forums_forums_modify')
category = ForumCategory.from_pk(1)
data = NewJSONEncoder().default(category)
check_dictionary(
data,
{
'id': 1,
'name': 'Site',
'description': 'General site discussion',
'position': 1,
'deleted': False,
},
)
assert 'forums' in data and len(data['forums']) == 2
def test_serialize_nested(app, authed_client):
add_permissions(app, 'forums_forums_modify')
category = ForumCategory.from_pk(1)
data = category.serialize(nested=True)
check_dictionary(
data,
{
'id': 1,
'name': 'Site',
'description': 'General site discussion',
'forums': None,
'position': 1,
'deleted': False,
},
strict=True,
)
|
test_serialize_no_perms
|
source.go
|
package influxdb
import (
"context"
"errors"
"github.com/influxdata/flux"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/memory"
"github.com/influxdata/flux/plan"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/influxdb/kit/tracing"
"github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/tsdb/cursors"
)
func init() {
execute.RegisterSource(ReadRangePhysKind, createReadFilterSource)
execute.RegisterSource(ReadGroupPhysKind, createReadGroupSource)
execute.RegisterSource(ReadTagKeysPhysKind, createReadTagKeysSource)
execute.RegisterSource(ReadTagValuesPhysKind, createReadTagValuesSource)
}
type runner interface {
run(ctx context.Context) error
}
type Source struct {
id execute.DatasetID
ts []execute.Transformation
alloc *memory.Allocator
stats cursors.CursorStats
runner runner
}
func (s *Source) Run(ctx context.Context) {
err := s.runner.run(ctx)
for _, t := range s.ts {
t.Finish(s.id, err)
}
}
func (s *Source) AddTransformation(t execute.Transformation) {
s.ts = append(s.ts, t)
}
func (s *Source) Metadata() flux.Metadata {
return flux.Metadata{
"influxdb/scanned-bytes": []interface{}{s.stats.ScannedBytes},
"influxdb/scanned-values": []interface{}{s.stats.ScannedValues},
}
}
func (s *Source) processTables(ctx context.Context, tables TableIterator, watermark execute.Time) error {
err := tables.Do(func(tbl flux.Table) error {
return s.processTable(ctx, tbl)
})
if err != nil {
return err
}
// Track the number of bytes and values scanned.
stats := tables.Statistics()
s.stats.ScannedValues += stats.ScannedValues
s.stats.ScannedBytes += stats.ScannedBytes
for _, t := range s.ts {
if err := t.UpdateWatermark(s.id, watermark); err != nil {
return err
}
}
return nil
}
func (s *Source) processTable(ctx context.Context, tbl flux.Table) error {
if len(s.ts) == 0 {
tbl.Done()
return nil
} else if len(s.ts) == 1 {
return s.ts[0].Process(s.id, tbl)
}
// There is more than one transformation so we need to
// copy the table for each transformation.
bufTable, err := execute.CopyTable(tbl)
if err != nil {
return err
}
defer bufTable.Done()
for _, t := range s.ts {
if err := t.Process(s.id, bufTable.Copy()); err != nil {
return err
}
}
return nil
}
type readFilterSource struct {
Source
reader Reader
readSpec ReadFilterSpec
}
func ReadFilterSource(id execute.DatasetID, r Reader, readSpec ReadFilterSpec, alloc *memory.Allocator) execute.Source {
src := new(readFilterSource)
src.id = id
src.alloc = alloc
src.reader = r
src.readSpec = readSpec
src.runner = src
return src
}
func (s *readFilterSource) run(ctx context.Context) error {
stop := s.readSpec.Bounds.Stop
tables, err := s.reader.ReadFilter(
ctx,
s.readSpec,
s.alloc,
)
if err != nil {
return err
}
return s.processTables(ctx, tables, stop)
}
func createReadFilterSource(s plan.ProcedureSpec, id execute.DatasetID, a execute.Administration) (execute.Source, error) {
span, ctx := tracing.StartSpanFromContext(a.Context())
defer span.Finish()
spec := s.(*ReadRangePhysSpec)
bounds := a.StreamContext().Bounds()
if bounds == nil {
return nil, errors.New("nil bounds passed to from")
}
deps := a.Dependencies()[FromKind].(Dependencies)
req := query.RequestFromContext(a.Context())
if req == nil {
return nil, errors.New("missing request on context")
}
orgID := req.OrganizationID
bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup)
if err != nil {
return nil, err
}
var filter *semantic.FunctionExpression
if spec.FilterSet {
filter = spec.Filter
}
return ReadFilterSource(
id,
deps.Reader,
ReadFilterSpec{
OrganizationID: orgID,
BucketID: bucketID,
Bounds: *bounds,
Predicate: filter,
},
a.Allocator(),
), nil
}
type readGroupSource struct {
Source
reader Reader
readSpec ReadGroupSpec
}
func ReadGroupSource(id execute.DatasetID, r Reader, readSpec ReadGroupSpec, alloc *memory.Allocator) execute.Source {
src := new(readGroupSource)
src.id = id
src.alloc = alloc
src.reader = r
src.readSpec = readSpec
src.runner = src
return src
}
func (s *readGroupSource) run(ctx context.Context) error {
stop := s.readSpec.Bounds.Stop
tables, err := s.reader.ReadGroup(
ctx,
s.readSpec,
s.alloc,
)
if err != nil {
return err
}
return s.processTables(ctx, tables, stop)
}
func createReadGroupSource(s plan.ProcedureSpec, id execute.DatasetID, a execute.Administration) (execute.Source, error) {
span, ctx := tracing.StartSpanFromContext(a.Context())
defer span.Finish()
spec := s.(*ReadGroupPhysSpec)
bounds := a.StreamContext().Bounds()
if bounds == nil {
return nil, errors.New("nil bounds passed to from")
}
deps := a.Dependencies()[FromKind].(Dependencies)
req := query.RequestFromContext(a.Context())
if req == nil {
return nil, errors.New("missing request on context")
}
orgID := req.OrganizationID
bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup)
if err != nil {
return nil, err
}
var filter *semantic.FunctionExpression
if spec.FilterSet {
filter = spec.Filter
}
return ReadGroupSource(
id,
deps.Reader,
ReadGroupSpec{
ReadFilterSpec: ReadFilterSpec{
OrganizationID: orgID,
BucketID: bucketID,
Bounds: *bounds,
Predicate: filter,
},
GroupMode: ToGroupMode(spec.GroupMode),
GroupKeys: spec.GroupKeys,
AggregateMethod: spec.AggregateMethod,
},
a.Allocator(),
), nil
}
func createReadTagKeysSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) {
span, ctx := tracing.StartSpanFromContext(a.Context())
defer span.Finish()
spec := prSpec.(*ReadTagKeysPhysSpec)
deps := a.Dependencies()[FromKind].(Dependencies)
req := query.RequestFromContext(a.Context())
if req == nil {
return nil, errors.New("missing request on context")
}
orgID := req.OrganizationID
bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup)
if err != nil {
return nil, err
}
var filter *semantic.FunctionExpression
if spec.FilterSet {
filter = spec.Filter
}
bounds := a.StreamContext().Bounds()
return ReadTagKeysSource(
dsid,
deps.Reader,
ReadTagKeysSpec{
ReadFilterSpec: ReadFilterSpec{
OrganizationID: orgID,
BucketID: bucketID,
Bounds: *bounds,
Predicate: filter,
},
},
a.Allocator(),
), nil
}
type readTagKeysSource struct {
Source
reader Reader
readSpec ReadTagKeysSpec
}
func ReadTagKeysSource(id execute.DatasetID, r Reader, readSpec ReadTagKeysSpec, alloc *memory.Allocator) execute.Source
|
func (s *readTagKeysSource) run(ctx context.Context) error {
ti, err := s.reader.ReadTagKeys(ctx, s.readSpec, s.alloc)
if err != nil {
return err
}
return s.processTables(ctx, ti, execute.Now())
}
func createReadTagValuesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) {
span, ctx := tracing.StartSpanFromContext(a.Context())
defer span.Finish()
spec := prSpec.(*ReadTagValuesPhysSpec)
deps := a.Dependencies()[FromKind].(Dependencies)
req := query.RequestFromContext(a.Context())
if req == nil {
return nil, errors.New("missing request on context")
}
orgID := req.OrganizationID
bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup)
if err != nil {
return nil, err
}
var filter *semantic.FunctionExpression
if spec.FilterSet {
filter = spec.Filter
}
bounds := a.StreamContext().Bounds()
return ReadTagValuesSource(
dsid,
deps.Reader,
ReadTagValuesSpec{
ReadFilterSpec: ReadFilterSpec{
OrganizationID: orgID,
BucketID: bucketID,
Bounds: *bounds,
Predicate: filter,
},
TagKey: spec.TagKey,
},
a.Allocator(),
), nil
}
type readTagValuesSource struct {
Source
reader Reader
readSpec ReadTagValuesSpec
}
func ReadTagValuesSource(id execute.DatasetID, r Reader, readSpec ReadTagValuesSpec, alloc *memory.Allocator) execute.Source {
src := &readTagValuesSource{
reader: r,
readSpec: readSpec,
}
src.id = id
src.alloc = alloc
src.runner = src
return src
}
func (s *readTagValuesSource) run(ctx context.Context) error {
ti, err := s.reader.ReadTagValues(ctx, s.readSpec, s.alloc)
if err != nil {
return err
}
return s.processTables(ctx, ti, execute.Now())
}
|
{
src := &readTagKeysSource{
reader: r,
readSpec: readSpec,
}
src.id = id
src.alloc = alloc
src.runner = src
return src
}
|
setup.go
|
package etcd
import (
"context"
"crypto/tls"
"github.com/coredns/coredns/core/dnsserver"
"github.com/coredns/coredns/plugin"
clog "github.com/coredns/coredns/plugin/pkg/log"
mwtls "github.com/coredns/coredns/plugin/pkg/tls"
"github.com/coredns/coredns/plugin/pkg/upstream"
"github.com/coredns/coredns/plugin/proxy"
etcdcv3 "github.com/coreos/etcd/clientv3"
"github.com/mholt/caddy"
)
var log = clog.NewWithPlugin("etcd")
func init() {
caddy.RegisterPlugin("etcd", caddy.Plugin{
ServerType: "dns",
Action: setup,
})
}
func setup(c *caddy.Controller) error {
e, stubzones, err := etcdParse(c)
if err != nil {
return plugin.Error("etcd", err)
}
if stubzones {
c.OnStartup(func() error {
e.UpdateStubZones()
return nil
})
}
dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler {
e.Next = next
return e
})
return nil
}
func etcdParse(c *caddy.Controller) (*Etcd, bool, error) {
stub := make(map[string]proxy.Proxy)
etc := Etcd{
// Don't default to a proxy for lookups.
// Proxy: proxy.NewLookup([]string{"8.8.8.8:53", "8.8.4.4:53"}),
PathPrefix: "skydns",
Ctx: context.Background(),
Stubmap: &stub,
}
var (
tlsConfig *tls.Config
err error
endpoints = []string{defaultEndpoint}
stubzones = false
)
for c.Next() {
etc.Zones = c.RemainingArgs()
if len(etc.Zones) == 0 {
etc.Zones = make([]string, len(c.ServerBlockKeys))
copy(etc.Zones, c.ServerBlockKeys)
}
for i, str := range etc.Zones {
etc.Zones[i] = plugin.Host(str).Normalize()
}
if c.NextBlock() {
for {
switch c.Val() {
case "stubzones":
stubzones = true
case "fallthrough":
etc.Fall.SetZonesFromArgs(c.RemainingArgs())
case "debug":
/* it is a noop now */
case "path":
if !c.NextArg() {
return &Etcd{}, false, c.ArgErr()
}
etc.PathPrefix = c.Val()
case "endpoint":
args := c.RemainingArgs()
if len(args) == 0 {
return &Etcd{}, false, c.ArgErr()
}
endpoints = args
case "upstream":
args := c.RemainingArgs()
u, err := upstream.New(args)
if err != nil {
return nil, false, err
}
etc.Upstream = u
case "tls": // cert key cacertfile
args := c.RemainingArgs()
tlsConfig, err = mwtls.NewTLSConfigFromArgs(args...)
if err != nil {
return &Etcd{}, false, err
}
default:
if c.Val() != "}" {
return &Etcd{}, false, c.Errf("unknown property '%s'", c.Val())
}
}
if !c.Next() {
break
}
}
}
client, err := newEtcdClient(endpoints, tlsConfig)
if err != nil {
return &Etcd{}, false, err
}
etc.Client = client
etc.endpoints = endpoints
return &etc, stubzones, nil
}
return &Etcd{}, false, nil
}
func newEtcdClient(endpoints []string, cc *tls.Config) (*etcdcv3.Client, error) {
etcdCfg := etcdcv3.Config{
Endpoints: endpoints,
TLS: cc,
}
cli, err := etcdcv3.New(etcdCfg)
if err != nil
|
return cli, nil
}
const defaultEndpoint = "http://localhost:2379"
|
{
return nil, err
}
|
model_or_filter_1.go
|
/*
Horizon Server API
Welcome to the Horizon Server API Reference documentation. This API reference provides comprehensive information about status of all Horizon Server components and resources. <br> Choose Latest spec from dropdown to view API reference on latest version available.
|
package gohorizon
import (
"encoding/json"
)
// OrFilter1 struct for OrFilter1
type OrFilter1 struct {
Filters *[]BaseFilter `json:"filters,omitempty"`
Type *string `json:"type,omitempty"`
}
// NewOrFilter1 instantiates a new OrFilter1 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewOrFilter1() *OrFilter1 {
this := OrFilter1{}
return &this
}
// NewOrFilter1WithDefaults instantiates a new OrFilter1 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewOrFilter1WithDefaults() *OrFilter1 {
this := OrFilter1{}
return &this
}
// GetFilters returns the Filters field value if set, zero value otherwise.
func (o *OrFilter1) GetFilters() []BaseFilter {
if o == nil || o.Filters == nil {
var ret []BaseFilter
return ret
}
return *o.Filters
}
// GetFiltersOk returns a tuple with the Filters field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *OrFilter1) GetFiltersOk() (*[]BaseFilter, bool) {
if o == nil || o.Filters == nil {
return nil, false
}
return o.Filters, true
}
// HasFilters returns a boolean if a field has been set.
func (o *OrFilter1) HasFilters() bool {
if o != nil && o.Filters != nil {
return true
}
return false
}
// SetFilters gets a reference to the given []BaseFilter and assigns it to the Filters field.
func (o *OrFilter1) SetFilters(v []BaseFilter) {
o.Filters = &v
}
// GetType returns the Type field value if set, zero value otherwise.
func (o *OrFilter1) GetType() string {
if o == nil || o.Type == nil {
var ret string
return ret
}
return *o.Type
}
// GetTypeOk returns a tuple with the Type field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *OrFilter1) GetTypeOk() (*string, bool) {
if o == nil || o.Type == nil {
return nil, false
}
return o.Type, true
}
// HasType returns a boolean if a field has been set.
func (o *OrFilter1) HasType() bool {
if o != nil && o.Type != nil {
return true
}
return false
}
// SetType gets a reference to the given string and assigns it to the Type field.
func (o *OrFilter1) SetType(v string) {
o.Type = &v
}
func (o OrFilter1) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Filters != nil {
toSerialize["filters"] = o.Filters
}
if o.Type != nil {
toSerialize["type"] = o.Type
}
return json.Marshal(toSerialize)
}
type NullableOrFilter1 struct {
value *OrFilter1
isSet bool
}
func (v NullableOrFilter1) Get() *OrFilter1 {
return v.value
}
func (v *NullableOrFilter1) Set(val *OrFilter1) {
v.value = val
v.isSet = true
}
func (v NullableOrFilter1) IsSet() bool {
return v.isSet
}
func (v *NullableOrFilter1) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableOrFilter1(val *OrFilter1) *NullableOrFilter1 {
return &NullableOrFilter1{value: val, isSet: true}
}
func (v NullableOrFilter1) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableOrFilter1) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}
|
API version: 2111
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
|
clientset.go
|
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
|
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package versioned
import (
"fmt"
tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1"
tektonv1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
)
type Interface interface {
Discovery() discovery.DiscoveryInterface
TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface
TektonV1beta1() tektonv1beta1.TektonV1beta1Interface
}
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*discovery.DiscoveryClient
tektonV1alpha1 *tektonv1alpha1.TektonV1alpha1Client
tektonV1beta1 *tektonv1beta1.TektonV1beta1Client
}
// TektonV1alpha1 retrieves the TektonV1alpha1Client
func (c *Clientset) TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface {
return c.tektonV1alpha1
}
// TektonV1beta1 retrieves the TektonV1beta1Client
func (c *Clientset) TektonV1beta1() tektonv1beta1.TektonV1beta1Interface {
return c.tektonV1beta1
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
return nil
}
return c.DiscoveryClient
}
// NewForConfig creates a new Clientset for the given config.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfig will generate a rate-limiter in configShallowCopy.
func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
if configShallowCopy.Burst <= 0 {
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
}
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.tektonV1alpha1, err = tektonv1alpha1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.tektonV1beta1, err = tektonv1beta1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
return &cs, nil
}
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.tektonV1alpha1 = tektonv1alpha1.NewForConfigOrDie(c)
cs.tektonV1beta1 = tektonv1beta1.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
}
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.tektonV1alpha1 = tektonv1alpha1.New(c)
cs.tektonV1beta1 = tektonv1beta1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
}
| |
authenticator.js
|
var should = require('should')
var mockFS = require('mock-fs')
var envRestorer = require( 'env-restorer' )
var authenticator = require('../lib/authenticator')
var testHelper = require('./_helper')
// Restore File system mocks, authentication state and environment variables
var restoreAll = function () {
mockFS.restore();
authenticator.purge();
envRestorer.restore();
}
describe('authenticator_test', function () {
describe('#getToken()', function () {
it('should read the access token from the config file', function () {
mockFS({
'data/strava_config': JSON.stringify({
'access_token': 'abcdefghi',
'client_id': 'jklmnopqr',
'client_secret': 'stuvwxyz',
'redirect_uri': 'https://sample.com'
|
delete process.env.STRAVA_ACCESS_TOKEN
authenticator.purge();
(authenticator.getToken()).should.be.exactly('abcdefghi')
})
it('should read the access token from the env vars', function () {
mockFS({
'data': {}
})
process.env.STRAVA_ACCESS_TOKEN = 'abcdefghi'
authenticator.purge();
(authenticator.getToken()).should.be.exactly('abcdefghi')
})
afterEach(restoreAll)
})
describe('#getClientId()', function () {
it('should read the client id from the config file', function () {
mockFS({
'data/strava_config': JSON.stringify({
'access_token': 'abcdefghi',
'client_id': 'jklmnopqr',
'client_secret': 'stuvwxyz',
'redirect_uri': 'https://sample.com'
})
})
delete process.env.STRAVA_CLIENT_ID
authenticator.purge();
(authenticator.getClientId()).should.be.exactly('jklmnopqr')
})
it('should read the client id from the env vars', function () {
mockFS({
'data': {}
})
process.env.STRAVA_CLIENT_ID = 'abcdefghi'
authenticator.purge();
(authenticator.getClientId()).should.be.exactly('abcdefghi')
})
afterEach(restoreAll)
})
describe('#getClientSecret()', function () {
it('should read the client secret from the config file', function () {
mockFS({
'data/strava_config': JSON.stringify({
'access_token': 'abcdefghi',
'client_id': 'jklmnopqr',
'client_secret': 'stuvwxyz',
'redirect_uri': 'https://sample.com'
})
})
delete process.env.STRAVA_CLIENT_SECRET
authenticator.purge();
(authenticator.getClientSecret()).should.be.exactly('stuvwxyz')
})
it('should read the client secret from the env vars', function () {
mockFS({
'data': {}
})
process.env.STRAVA_CLIENT_SECRET = 'abcdefghi'
authenticator.purge();
(authenticator.getClientSecret()).should.be.exactly('abcdefghi')
})
afterEach(restoreAll)
})
describe('#getRedirectUri()', function () {
it('should read the redirect URI from the config file', function () {
mockFS({
'data/strava_config': JSON.stringify({
'access_token': 'abcdefghi',
'client_id': 'jklmnopqr',
'client_secret': 'stuvwxyz',
'redirect_uri': 'https://sample.com'
})
})
delete process.env.STRAVA_REDIRECT_URI
authenticator.purge();
(authenticator.getRedirectUri()).should.be.exactly('https://sample.com')
})
it('should read the redirect URI from the env vars', function () {
mockFS({
'data': {}
})
process.env.STRAVA_REDIRECT_URI = 'https://sample.com'
authenticator.purge();
(authenticator.getRedirectUri()).should.be.exactly('https://sample.com')
})
afterEach(restoreAll)
})
})
|
})
})
|
pgpwordlist.go
|
/*
* Copyright (c) 2015-2016 The Hdfchain developers
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package pgpwordlist
import (
"strings"
"github.com/hdfchain/hdfwallet/errors"
)
// ByteToMnemonic returns the PGP word list encoding of b when found at index.
func ByteToMnemonic(b byte, index int) string {
bb := uint16(b) * 2
if index%2 != 0 {
bb++
}
return wordList[bb]
}
// DecodeMnemonics returns the decoded value that is encoded by words. Any
// words that are whitespace are empty are skipped.
func
|
(words []string) ([]byte, error) {
const op errors.Op = "pgpwordlist.DecodeMnemonics"
decoded := make([]byte, len(words))
idx := 0
for _, w := range words {
w = strings.TrimSpace(w)
if w == "" {
continue
}
b, ok := wordIndexes[strings.ToLower(w)]
if !ok {
err := errors.Errorf("word %v is not in the PGP word list", w)
return nil, errors.E(op, errors.Encoding, err)
}
if int(b%2) != idx%2 {
err := errors.Errorf("word %v is not valid at position %v, "+
"check for missing words", w, idx)
return nil, errors.E(op, errors.Encoding, err)
}
decoded[idx] = byte(b / 2)
idx++
}
return decoded[:idx], nil
}
|
DecodeMnemonics
|
nz-progress.component.ts
|
/**
* @license
* Copyright Alibaba.com All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://github.com/NG-ZORRO/ng-zorro-antd/blob/master/LICENSE
*/
import {
ChangeDetectionStrategy,
Component,
Input,
OnChanges,
OnInit,
SimpleChanges,
ViewEncapsulation
} from '@angular/core';
import { isNotNil, InputNumber } from 'ng-zorro-antd/core';
export type NzProgressGapPositionType = 'top' | 'bottom' | 'left' | 'right';
export type NzProgressStatusType = 'success' | 'exception' | 'active' | 'normal';
export type NzProgressTypeType = 'line' | 'circle' | 'dashboard';
export type NzProgressStrokeLinecapType = 'round' | 'square';
@Component({
changeDetection: ChangeDetectionStrategy.OnPush,
encapsulation: ViewEncapsulation.None,
selector: 'nz-progress',
exportAs: 'nzProgress',
preserveWhitespaces: false,
templateUrl: './nz-progress.component.html'
})
export class
|
implements OnInit, OnChanges {
@Input() nzShowInfo = true;
@Input() nzWidth = 132;
@Input() nzStrokeColor: string;
@Input() nzSize: string;
@Input() nzFormat?: (percent: number) => string;
@Input() @InputNumber() nzSuccessPercent?: number;
@Input() @InputNumber() nzPercent: number;
@Input() @InputNumber() nzStrokeWidth: number;
@Input() @InputNumber() nzGapDegree: number;
@Input() nzStatus: NzProgressStatusType;
@Input() nzType: NzProgressTypeType = 'line';
@Input() nzGapPosition?: NzProgressGapPositionType;
@Input() nzStrokeLinecap: NzProgressStrokeLinecapType = 'round';
trailPathStyle: { [key: string]: string };
strokePathStyle: { [key: string]: string };
pathString: string;
icon: string;
statusColorMap: { [key: string]: string } = {
normal: '#108ee9',
exception: '#ff5500',
success: '#87d068'
};
private cachedStatus: NzProgressStatusType = 'normal';
private inferredStatus: NzProgressStatusType = 'normal';
private inferredStrokeWidth: number = 8;
private inferredGapPosition: string;
private inferredGapDegree: number;
get formatter(): (percent: number) => string {
return this.nzFormat || ((p: number): string => `${p}%`);
}
get status(): NzProgressStatusType {
return this.nzStatus || this.inferredStatus;
}
get strokeWidth(): number {
return this.nzStrokeWidth || this.inferredStrokeWidth;
}
get isCircleStyle(): boolean {
return this.nzType === 'circle' || this.nzType === 'dashboard';
}
ngOnInit(): void {
this.updatePathStyles();
this.updateIcon();
}
ngOnChanges(changes: SimpleChanges): void {
const {
nzGapPosition,
nzStrokeLinecap,
nzGapDegree,
nzType,
nzSize,
nzStatus,
nzPercent,
nzSuccessPercent
} = changes;
if (nzGapPosition || nzStrokeLinecap || nzGapDegree || nzType || nzPercent) {
this.updatePathStyles();
}
if (nzSize) {
if (this.nzSize === 'small') {
this.inferredStrokeWidth = 6;
}
}
if (nzStatus) {
this.cachedStatus = this.nzStatus || this.cachedStatus;
this.updateIcon();
}
if (nzPercent || nzSuccessPercent) {
const fillAll = parseInt(this.nzPercent.toString(), 10) >= 100;
if (fillAll) {
if ((isNotNil(this.nzSuccessPercent) && this.nzSuccessPercent! >= 100) || this.nzSuccessPercent === undefined) {
this.inferredStatus = 'success';
}
} else {
this.inferredStatus = this.cachedStatus;
}
this.updateIcon();
}
if (nzType) {
if (this.nzType !== 'line') {
this.inferredStrokeWidth = 6;
}
if (this.nzType === 'dashboard') {
this.inferredGapPosition = 'bottom';
this.inferredGapDegree = 75;
}
if (this.nzType === 'circle') {
this.inferredGapDegree = 0;
}
}
}
updatePathStyles(): void {
const radius = 50 - this.strokeWidth / 2;
const gapPosition = this.nzGapPosition || this.inferredGapPosition;
let beginPositionX = 0;
let beginPositionY = -radius;
let endPositionX = 0;
let endPositionY = radius * -2;
switch (gapPosition) {
case 'left':
beginPositionX = -radius;
beginPositionY = 0;
endPositionX = radius * 2;
endPositionY = 0;
break;
case 'right':
beginPositionX = radius;
beginPositionY = 0;
endPositionX = radius * -2;
endPositionY = 0;
break;
case 'bottom':
beginPositionY = radius;
endPositionY = radius * 2;
break;
default:
}
this.pathString = `M 50,50 m ${beginPositionX},${beginPositionY}
a ${radius},${radius} 0 1 1 ${endPositionX},${-endPositionY}
a ${radius},${radius} 0 1 1 ${-endPositionX},${endPositionY}`;
const len = Math.PI * 2 * radius;
const gapDegree = this.nzGapDegree || this.inferredGapDegree;
this.trailPathStyle = {
strokeDasharray: `${len - gapDegree}px ${len}px`,
strokeDashoffset: `-${gapDegree / 2}px`,
transition: 'stroke-dashoffset .3s ease 0s, stroke-dasharray .3s ease 0s, stroke .3s'
};
this.strokePathStyle = {
stroke: this.nzStrokeColor || (null as any), // tslint:disable-line:no-any
strokeDasharray: `${(this.nzPercent / 100) * (len - gapDegree)}px ${len}px`,
strokeDashoffset: `-${gapDegree / 2}px`,
transition: 'stroke-dashoffset .3s ease 0s, stroke-dasharray .3s ease 0s, stroke .3s, stroke-width .06s ease .3s'
};
}
updateIcon(): void {
const isCircle = this.nzType === 'circle' || this.nzType === 'dashboard';
const ret = this.status === 'success' ? 'check' : this.status === 'exception' ? 'close' : '';
this.icon = ret ? ret + (isCircle ? '-o' : '-circle-fill') : '';
}
}
|
NzProgressComponent
|
fields.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package mage
import (
"path/filepath"
"github.com/magefile/mage/sh"
)
// GenerateFieldsYAML generates a fields.yml file for a Beat. This will include
// the common fields specified by libbeat, the common fields for the Beat,
// and any additional fields.yml files you specify.
//
// fieldsFiles specifies additional directories to search recursively for files
// named fields.yml. The contents of each fields.yml will be included in the
// generated file.
func GenerateFieldsYAML(fieldsFiles ...string) error {
const globalFieldsCmdPath = "libbeat/scripts/cmd/global_fields/main.go"
beatsDir, err := ElasticBeatsDir()
if err != nil
|
globalFieldsCmd := sh.RunCmd("go", "run",
filepath.Join(beatsDir, globalFieldsCmdPath),
"-es_beats_path", beatsDir,
"-beat_path", CWD(),
"-out", "fields.yml",
)
return globalFieldsCmd(fieldsFiles...)
}
|
{
return err
}
|
test_decoder.py
|
# Name: test_decoder.py
# Since: April 13th, 2020
# Author: Christen Ford
# Purpose: Implementes unit tests for simplejson.decoder.
from unittest import TestCase
import simplejson.decoder as decoder
import simplejson.errors as errors
class TestDecoder(TestCase):
"""Implements a set of unit tests for the simplejson.decoder
module. These test cases make sane attempts at testing each
class and method found in the decoder module but they
are not exhaustively extensive.
"""
def test_scanstring_correct(self):
"""
Description: Tests that the py_scanstring() function
is able to parse valid JSON. Assumes optional
functional parameters are left at their defaults.
Input: '{'abc': 0, 'def': 1, 'ghi': 2'}'
Output: A tuple of the decoded JSON string and
the index in the string after the ending quote.
Test Case: Corresponds to test TEST-0000.
"""
test_input = '"{"abc": 0, "def": 1, "ghi": 2}"'
decoded_str, last_char_index = decoder.py_scanstring(
s=test_input,
end=1
)
self.assertEqual(decoded_str, "{")
self.assertEqual(last_char_index, 3)
def test_scanstring_malformed(self):
"""
Description: Tests that the py_scanstring() function is
able to properly detect malformed JSON. This test case
may include multiple different strings to ensure
well-rounded error detection.
Input:
(tuple): ("{]", "[}")
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0001.
"""
test_inputs = ('{]', '[}')
for test_input in test_inputs:
self.assertRaises(
decoder.JSONDecodeError,
decoder.py_scanstring,
s=test_input,
end=1
)
def test_scanstring_empty(self):
"""
Description: Tests that the py_scanstring() function is
able to properly detect empty strings.
Input:
(str): ""
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0002.
"""
test_input = ''
self.assertRaises(
errors.JSONDecodeError,
decoder.py_scanstring,
s=test_input,
end=1
)
def test_json_object_correct(self):
"""
Description: Test that the JSONObject() method can properly
decode JSON objects to Python dictionaries.
Input:
(tuple): ("{"abc": 0, "def": 1, "ghi": 2}", 0)
Output:
(dict): ({"abc": 0, "def": 1, "ghi": 2}, 30)
Test Case: Corresponds to test TEST-0003.
"""
test_input = ('{"abc": 0, "def": 1, "ghi": 2}', 1)
out_dict = dict()
out_dict["abc"] = 0
out_dict["def"] = 1
out_dict["ghi"] = 2
test_output = (out_dict, 30)
dcdr = decoder.JSONDecoder()
self.assertEqual(
decoder.JSONObject(
state=test_input,
encoding=dcdr.encoding,
strict=dcdr.strict,
scan_once=dcdr.scan_once,
object_hook=dcdr.object_hook,
object_pairs_hook=dcdr.object_pairs_hook
),
test_output
)
def test_json_object_malformed(self):
"""
Description: Tests that the JSONObject() method can detect
improperly formed JSON object.
Input:
(tuple): ("{"abc": 0, "def": 1, "ghi" :2]", 1)
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0004.
"""
test_input = ('\"{"abc": 0, "def": 1, "ghi": 2]\"', 1)
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONObject,
state=test_input,
encoding=dcdr.encoding,
strict = dcdr.strict,
scan_once = dcdr.scan_once,
object_hook = dcdr.object_hook,
object_pairs_hook = dcdr.object_pairs_hook
)
def test_json_object_empty(self):
"""
Description: Tests that the JSONObject() method can detect
empty strings.
Input:
(tuple): ('', 0)
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0005.
"""
test_input = ("", 0)
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONObject,
state=test_input,
encoding=dcdr.encoding,
strict = dcdr.strict,
scan_once = dcdr.scan_once,
object_hook = dcdr.object_hook,
object_pairs_hook = dcdr.object_pairs_hook
)
def test_json_array_correct(self):
"""
Description: Tests that the JSONArray method can decode
a properly formed JSONArray.
Input:
(tuple): ("["abc", "def", "ghi"]", 1)
Output:
Test Case: Corresponds to test TEST-0006.
"""
test_input = ('["abc", "def", "ghi"]', 1)
test_output = (['abc', 'def', 'ghi'], 21)
dcdr = decoder.JSONDecoder()
self.assertEqual(
decoder.JSONArray(
test_input,
dcdr.scan_once
),
test_output
)
def
|
(self):
"""
Description: Tests that the JSONArray method can properly
detect a malformed JSON array.
Input:
(str): ("["abc", "def", "ghi"}", 1)
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0007.
"""
test_input = ('["abc", "def", "ghi"}', 1)
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONArray,
test_input,
dcdr.scan_once
)
def test_json_array_empty(self):
"""
Description: Tests that the JSONArray() method can
properly detect an empty string.
Input:
(tuple): ("", 0)
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0008.
"""
test_input = ('', 0)
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONArray,
test_input,
dcdr.scan_once
)
def test_json_decoder_create_utf(self):
"""
Description: Tests that a JSONDecoder object can be created
to decode JSON strings with the 'utf-8' character encoding.
Input:
(str): "utf-8"
Output:
(JSONDecoder)
Test Case: Corresponds to test TEST-0009.
"""
dcdr = decoder.JSONDecoder(encoding="utf-8")
self.assertEqual(dcdr.encoding, "utf-8")
def test_json_decoder_create_unicode(self):
"""
Description: Tests that a JSONDecoder object can be created
with the unicode character encoding.
Input:
(str): "unicode"
Output:
(JSONDecoder)
TestCase: Corresponds to test TEST-0010.
"""
dcdr = decoder.JSONDecoder(encoding="unicode")
self.assertEqual(dcdr.encoding, "unicode")
def test_json_decoder_create_invalid(self):
"""
Description: Tests that a JSONDecoder object cannot be
created when given an invalid encoding.
Input:
(str): "ISO-8859-1"
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0011.
"""
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONDecoder,
encoding="ISO-8859-1"
)
def test_json_decoder_decode_correct(self):
"""
Description: Tests that the decode() method of the
JSONDecoder class can decode a properly formed JSON
document.
Input:
(str): {"id": "001", "name": "test-012", "items": ["a", "b", "c"]}
Output:
Test Case: Corresponds to test TEST-0012.
"""
test_input = '{"id": "001", "name": "test-012", "items": ["a", "b", "c"]}'
test_output = dict()
test_output["id"] = "001"
test_output["name"] = "test-012"
test_output["items"] = ["a", "b", "c"]
dcdr = decoder.JSONDecoder()
self.assertEqual(dcdr.decode(test_input), test_output)
def test_json_decoder_decode_malformed(self):
"""
Description: Tests that the decode() method of the
JSONDecoder class can properly recognize a malformed JSON
document.
Input:
(str): {"id": "001", "name": "test-012", "items": ["a", "b", "c"]]
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0013.
"""
test_input = '{"id": "001", "name": "test-012", "items": ["a", "b", "c"]]]'
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
dcdr.decode,
test_input
)
def test_json_decoder_decoder_empty(self):
"""
Decsription: Tests that the decode() method of the
JSONDecode class can recognize an empty string.
Input:
(str): ""
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0014.
"""
test_input = ''
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
dcdr.decode,
test_input
)
def test_raw_decoder_decode_correct(self):
"""
Description: Tests that the raw_decode() method of the
JSONDecoder class can properly decode an embedded JSON
document.
Input:
(str): "["abc", "def", "ghi"] This is a test!"
Output:
Test Case: Corresponds to test TEST-0015.
"""
dcdr = decoder.JSONDecoder()
test_input = '["abc", "def", "ghi"] This is a test!'
test_output = (['abc', 'def', 'ghi'], 21)
self.assertEqual(dcdr.raw_decode(test_input), test_output)
def test_raw_decoder_decode_malformed(self):
"""
Description: Tests that the raw_decode() method of the
JSONDecoder class can recognize a malformed JSON document.
Input:
(str): "["abc", "def", "ghi"} This is a test!"
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0016.
"""
dcdr = decoder.JSONDecoder()
test_input = '["abc", "def", "ghi"} This is a test!'
self.assertRaises(
errors.JSONDecodeError,
dcdr.raw_decode,
test_input
)
def test_raw_decoder_decoder_empty(self):
"""
Description: Tests that the raw_decode() method of the
JSONDecoder class can recognize an empty string.
Input:
(str): ""
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0017.
"""
test_input = ''
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
dcdr.raw_decode,
test_input
)
|
test_json_array_malformed
|
index.ts
|
import apiWrapper from 'lib/api/apiWrapper'
import { NextApiRequest, NextApiResponse } from 'next'
export default (req: NextApiRequest, res: NextApiResponse) => apiWrapper(req, res, handler)
async function handler(req: NextApiRequest, res: NextApiResponse) {
const { method } = req
switch (method) {
case 'GET':
return handleGetAll(req, res)
default:
res.setHeader('Allow', ['GET'])
res.status(405).json({ data: null, error: { message: `Method ${method} Not Allowed` } })
}
}
const handleGetAll = async (req: NextApiRequest, res: NextApiResponse) => {
// Platform specific endpoint
|
ref: 'default',
name: 'Default Project',
organization_id: 1,
cloud_provider: 'localhost',
status: 'ACTIVE_HEALTHY',
region: 'local',
},
]
return res.status(200).json(response)
}
|
const response = [
{
id: 1,
|
request_metrics.go
|
package xhttp
import (
"net/http"
"strconv"
"time"
"github.com/go-phorce/dolly/metrics"
"github.com/go-phorce/dolly/metrics/tags"
"github.com/go-phorce/dolly/xhttp/identity"
)
// a http.Handler that records execution metrics of the wrapper handler
type requestMetrics struct {
handler http.Handler
responseCodes []string
}
// NewRequestMetrics creates a wrapper handler to produce metrics for each request
func
|
(h http.Handler) http.Handler {
rm := requestMetrics{
handler: h,
responseCodes: make([]string, 599),
}
for idx := range rm.responseCodes {
rm.responseCodes[idx] = strconv.Itoa(idx)
}
return &rm
}
func (rm *requestMetrics) statusCode(statusCode int) string {
if (statusCode < len(rm.responseCodes)) && (statusCode > 0) {
return rm.responseCodes[statusCode]
}
return strconv.Itoa(statusCode)
}
var (
keyForHTTPReqPerf = []string{"http", "request", "perf"}
keyForHTTPReqSuccessful = []string{"http", "request", "status", "successful"}
keyForHTTPReqFailed = []string{"http", "request", "status", "failed"}
)
func (rm *requestMetrics) ServeHTTP(w http.ResponseWriter, r *http.Request) {
start := time.Now().UTC()
rc := NewResponseCapture(w)
rm.handler.ServeHTTP(rc, r)
role := identity.ForRequest(r).Identity().Role()
sc := rc.StatusCode()
tags := []metrics.Tag{
{Name: tags.Method, Value: r.Method},
{Name: tags.Role, Value: role},
{Name: tags.Status, Value: rm.statusCode(sc)},
{Name: tags.URI, Value: r.URL.Path},
}
if sc >= 400 {
metrics.IncrCounter(keyForHTTPReqFailed, 1, tags...)
} else {
metrics.MeasureSince(keyForHTTPReqPerf, start, tags...)
metrics.IncrCounter(keyForHTTPReqSuccessful, 1, tags...)
}
}
|
NewRequestMetrics
|
experiment_planner_baseline_3DUNet.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from collections import OrderedDict
from copy import deepcopy
import nnunet
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.configuration import default_num_threads
from nnunet.experiment_planning.DatasetAnalyzer import DatasetAnalyzer
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props_poolLateV2
from nnunet.experiment_planning.utils import create_lists_from_splitted_dataset
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.paths import *
from nnunet.preprocessing.cropping import get_case_identifier_from_npz
from nnunet.training.model_restore import recursive_find_python_class
class ExperimentPlanner(object):
|
def maybe_mkdir_p(directory):
directory = os.path.abspath(directory)
splits = directory.split("\\")[1:]
base = directory.split('\\')[0]
for i in range(0, len(splits)):
if not os.path.isdir(join(base, join("\\", *splits[:i+1]))):
try:
os.mkdir(join(base, join("\\", *splits[:i+1])))
except FileExistsError:
# this can sometimes happen when two jobs try to create the same directory at the same time,
# especially on network drives.
print("WARNING: Folder %s already existed and does not need to be created" % directory)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--task_ids", nargs="+", help="list of int")
parser.add_argument("-p", action="store_true", help="set this if you actually want to run the preprocessing. If "
"this is not set then this script will only create the plans file")
parser.add_argument("-tl", type=int, required=False, default=8, help="num_threads_lowres")
parser.add_argument("-tf", type=int, required=False, default=8, help="num_threads_fullres")
args = parser.parse_args()
task_ids = args.task_ids
run_preprocessing = args.p
tl = args.tl
tf = args.tf
tasks = []
for i in task_ids:
i = int(i)
candidates = subdirs(nnUNet_cropped_data, prefix="Task%03.0d" % i, join=False)
assert len(candidates) == 1
tasks.append(candidates[0])
for t in tasks:
try:
print("\n\n\n", t)
cropped_out_dir = os.path.join(nnUNet_cropped_data, t)
preprocessing_output_dir_this_task = os.path.join(preprocessing_output_dir, t)
splitted_4d_output_dir_task = os.path.join(nnUNet_raw_data, t)
lists, modalities = create_lists_from_splitted_dataset(splitted_4d_output_dir_task)
dataset_analyzer = DatasetAnalyzer(cropped_out_dir, overwrite=False)
_ = dataset_analyzer.analyze_dataset() # this will write output files that will be used by the ExperimentPlanner
maybe_mkdir_p(preprocessing_output_dir_this_task)
shutil.copy(join(cropped_out_dir, "dataset_properties.pkl"), preprocessing_output_dir_this_task)
shutil.copy(join(nnUNet_raw_data, t, "dataset.json"), preprocessing_output_dir_this_task)
threads = (tl, tf)
print("number of threads: ", threads, "\n")
exp_planner = ExperimentPlanner(cropped_out_dir, preprocessing_output_dir_this_task)
exp_planner.plan_experiment()
if run_preprocessing:
exp_planner.run_preprocessing(threads)
except Exception as e:
print(e)
|
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
self.folder_with_cropped_data = folder_with_cropped_data
self.preprocessed_output_folder = preprocessed_output_folder
self.list_of_cropped_npz_files = subfiles(self.folder_with_cropped_data, True, None, ".npz", True)
self.preprocessor_name = "GenericPreprocessor"
assert isfile(join(self.folder_with_cropped_data, "dataset_properties.pkl")), \
"folder_with_cropped_data must contain dataset_properties.pkl"
self.dataset_properties = load_pickle(join(self.folder_with_cropped_data, "dataset_properties.pkl"))
self.plans_per_stage = OrderedDict()
self.plans = OrderedDict()
self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans" + "fixed_plans_3D.pkl")
self.data_identifier = default_data_identifier
self.transpose_forward = [0, 1, 2]
self.transpose_backward = [0, 1, 2]
self.unet_base_num_features = Generic_UNet.BASE_NUM_FEATURES_3D
self.unet_max_num_filters = 320
self.unet_max_numpool = 999
self.unet_min_batch_size = 2
self.unet_featuremap_min_edge_length = 4
self.target_spacing_percentile = 50
self.anisotropy_threshold = 3
self.how_much_of_a_patient_must_the_network_see_at_stage0 = 4 # 1/4 of a patient
self.batch_size_covers_max_percent_of_dataset = 0.05 # all samples in the batch together cannot cover more
# than 5% of the entire dataset
self.conv_per_stage = 2
def get_target_spacing(self):
spacings = self.dataset_properties['all_spacings']
# target = np.median(np.vstack(spacings), 0)
# if target spacing is very anisotropic we may want to not downsample the axis with the worst spacing
# uncomment after mystery task submission
"""worst_spacing_axis = np.argmax(target)
if max(target) > (2.5 * min(target)):
spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis]
target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 5)
target[worst_spacing_axis] = target_spacing_of_that_axis"""
target = np.percentile(np.vstack(spacings), self.target_spacing_percentile, 0)
return target
def save_my_plans(self):
with open(self.plans_fname, 'wb') as f:
pickle.dump(self.plans, f)
def load_my_plans(self):
self.plans = load_pickle(self.plans_fname)
self.plans_per_stage = self.plans['plans_per_stage']
self.dataset_properties = self.plans['dataset_properties']
self.transpose_forward = self.plans['transpose_forward']
self.transpose_backward = self.plans['transpose_backward']
def determine_postprocessing(self):
pass
"""
Spoiler: This is unused, postprocessing was removed. Ignore it.
:return:
print("determining postprocessing...")
props_per_patient = self.dataset_properties['segmentation_props_per_patient']
all_region_keys = [i for k in props_per_patient.keys() for i in props_per_patient[k]['only_one_region'].keys()]
all_region_keys = list(set(all_region_keys))
only_keep_largest_connected_component = OrderedDict()
for r in all_region_keys:
all_results = [props_per_patient[k]['only_one_region'][r] for k in props_per_patient.keys()]
only_keep_largest_connected_component[tuple(r)] = all(all_results)
print("Postprocessing: only_keep_largest_connected_component", only_keep_largest_connected_component)
all_classes = self.dataset_properties['all_classes']
classes = [i for i in all_classes if i > 0]
props_per_patient = self.dataset_properties['segmentation_props_per_patient']
min_size_per_class = OrderedDict()
for c in classes:
all_num_voxels = []
for k in props_per_patient.keys():
all_num_voxels.append(props_per_patient[k]['volume_per_class'][c])
if len(all_num_voxels) > 0:
min_size_per_class[c] = np.percentile(all_num_voxels, 1) * MIN_SIZE_PER_CLASS_FACTOR
else:
min_size_per_class[c] = np.inf
min_region_size_per_class = OrderedDict()
for c in classes:
region_sizes = [l for k in props_per_patient for l in props_per_patient[k]['region_volume_per_class'][c]]
if len(region_sizes) > 0:
min_region_size_per_class[c] = min(region_sizes)
# we don't need that line but better safe than sorry, right?
min_region_size_per_class[c] = min(min_region_size_per_class[c], min_size_per_class[c])
else:
min_region_size_per_class[c] = 0
print("Postprocessing: min_size_per_class", min_size_per_class)
print("Postprocessing: min_region_size_per_class", min_region_size_per_class)
return only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class
"""
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
"""
Computation of input patch size starts out with the new median shape (in voxels) of a dataset. This is
opposed to prior experiments where I based it on the median size in mm. The rationale behind this is that
for some organ of interest the acquisition method will most likely be chosen such that the field of view and
voxel resolution go hand in hand to show the doctor what they need to see. This assumption may be violated
for some modalities with anisotropy (cine MRI) but we will have t live with that. In future experiments I
will try to 1) base input patch size match aspect ratio of input size in mm (instead of voxels) and 2) to
try to enforce that we see the same 'distance' in all directions (try to maintain equal size in mm of patch)
The patches created here attempt keep the aspect ratio of the new_median_shape
:param current_spacing:
:param original_spacing:
:param original_shape:
:param num_cases:
:return:
"""
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape) * num_cases
# the next line is what we had before as a default. The patch size had the same aspect ratio as the median shape of a patient. We swapped t
# input_patch_size = new_median_shape
# compute how many voxels are one mm
input_patch_size = 1 / np.array(current_spacing)
# normalize voxels per mm
input_patch_size /= input_patch_size.mean()
# create an isotropic patch of size 512x512x512mm
input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value
input_patch_size = np.round(input_patch_size).astype(int)
# clip it to the median shape of the dataset because patches larger then that make not much sense
input_patch_size = [min(i, j) for i, j in zip(input_patch_size, new_median_shape)]
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
ref = Generic_UNet.use_this_for_batch_size_computation_3D
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes,
pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
while here > ref:
axis_to_be_reduced = np.argsort(new_shp / new_median_shape)[-1]
tmp = deepcopy(new_shp)
tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]
_, _, _, _, shape_must_be_divisible_by_new = \
get_pool_and_conv_props_poolLateV2(tmp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]
# we have to recompute numpool now:
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(new_shp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes, pool_op_kernel_sizes,
conv_per_stage=self.conv_per_stage)
# print(new_shp)
input_patch_size = new_shp
batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what works with 128**3
batch_size = int(np.floor(max(ref / here, 1) * batch_size))
# check if batch size is too large
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
max_batch_size = max(max_batch_size, self.unet_min_batch_size)
batch_size = max(1, min(batch_size, max_batch_size))
do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[
0]) > self.anisotropy_threshold
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_num_pool_per_axis,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'do_dummy_2D_data_aug': do_dummy_2D_data_aug,
'pool_op_kernel_sizes': pool_op_kernel_sizes,
'conv_kernel_sizes': conv_kernel_sizes,
}
return plan
def plan_experiment(self):
use_nonzero_mask_for_normalization = self.determine_whether_to_use_mask_for_norm()
print("Are we using the nonzero mask for normalizaion?", use_nonzero_mask_for_normalization)
spacings = self.dataset_properties['all_spacings']
sizes = self.dataset_properties['all_sizes']
all_classes = self.dataset_properties['all_classes']
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
target_spacing = self.get_target_spacing()
new_shapes = [np.array(i) / target_spacing * np.array(j) for i, j in zip(spacings, sizes)]
max_spacing_axis = np.argmax(target_spacing)
remaining_axes = [i for i in list(range(3)) if i != max_spacing_axis]
self.transpose_forward = [max_spacing_axis] + remaining_axes
self.transpose_backward = [np.argwhere(np.array(self.transpose_forward) == i)[0][0] for i in range(3)]
# we base our calculations on the median shape of the datasets
median_shape = np.median(np.vstack(new_shapes), 0)
print("the median shape of the dataset is ", median_shape)
max_shape = np.max(np.vstack(new_shapes), 0)
print("the max shape in the dataset is ", max_shape)
min_shape = np.min(np.vstack(new_shapes), 0)
print("the min shape in the dataset is ", min_shape)
print("we don't want feature maps smaller than ", self.unet_featuremap_min_edge_length, " in the bottleneck")
# how many stages will the image pyramid have?
self.plans_per_stage = list()
target_spacing_transposed = np.array(target_spacing)[self.transpose_forward]
median_shape_transposed = np.array(median_shape)[self.transpose_forward]
print("the transposed median shape of the dataset is ", median_shape_transposed)
print("generating configuration for 3d_fullres")
self.plans_per_stage.append(self.get_properties_for_stage(target_spacing_transposed, target_spacing_transposed,
median_shape_transposed,
len(self.list_of_cropped_npz_files),
num_modalities, len(all_classes) + 1))
# thanks Zakiyi (https://github.com/MIC-DKFZ/nnUNet/issues/61) for spotting this bug :-)
# if np.prod(self.plans_per_stage[-1]['median_patient_size_in_voxels'], dtype=np.int64) / \
# architecture_input_voxels < HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0:
architecture_input_voxels_here = np.prod(self.plans_per_stage[-1]['patch_size'], dtype=np.int64)
if np.prod(median_shape) / architecture_input_voxels_here < \
self.how_much_of_a_patient_must_the_network_see_at_stage0:
more = False
else:
more = True
if more:
print("generating configuration for 3d_lowres")
# if we are doing more than one stage then we want the lowest stage to have exactly
# HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0 (this is 4 by default so the number of voxels in the
# median shape of the lowest stage must be 4 times as much as the network can process at once (128x128x128 by
# default). Problem is that we are downsampling higher resolution axes before we start downsampling the
# out-of-plane axis. We could probably/maybe do this analytically but I am lazy, so here
# we do it the dumb way
lowres_stage_spacing = deepcopy(target_spacing)
num_voxels = np.prod(median_shape, dtype=np.float64)
while num_voxels > self.how_much_of_a_patient_must_the_network_see_at_stage0 * architecture_input_voxels_here:
max_spacing = max(lowres_stage_spacing)
if np.any((max_spacing / lowres_stage_spacing) > 2):
lowres_stage_spacing[(max_spacing / lowres_stage_spacing) > 2] \
*= 1.01
else:
lowres_stage_spacing *= 1.01
num_voxels = np.prod(target_spacing / lowres_stage_spacing * median_shape, dtype=np.float64)
lowres_stage_spacing_transposed = np.array(lowres_stage_spacing)[self.transpose_forward]
new = self.get_properties_for_stage(lowres_stage_spacing_transposed, target_spacing_transposed,
median_shape_transposed,
len(self.list_of_cropped_npz_files),
num_modalities, len(all_classes) + 1)
architecture_input_voxels_here = np.prod(new['patch_size'], dtype=np.int64)
if 2 * np.prod(new['median_patient_size_in_voxels'], dtype=np.int64) < np.prod(
self.plans_per_stage[0]['median_patient_size_in_voxels'], dtype=np.int64):
self.plans_per_stage.append(new)
self.plans_per_stage = self.plans_per_stage[::-1]
self.plans_per_stage = {i: self.plans_per_stage[i] for i in range(len(self.plans_per_stage))} # convert to dict
print(self.plans_per_stage)
print("transpose forward", self.transpose_forward)
print("transpose backward", self.transpose_backward)
normalization_schemes = self.determine_normalization_scheme()
only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class = None, None, None
# removed training data based postprocessing. This is deprecated
# these are independent of the stage
plans = {'num_stages': len(list(self.plans_per_stage.keys())), 'num_modalities': num_modalities,
'modalities': modalities, 'normalization_schemes': normalization_schemes,
'dataset_properties': self.dataset_properties, 'list_of_npz_files': self.list_of_cropped_npz_files,
'original_spacings': spacings, 'original_sizes': sizes,
'preprocessed_data_folder': self.preprocessed_output_folder, 'num_classes': len(all_classes),
'all_classes': all_classes, 'base_num_features': self.unet_base_num_features,
'use_mask_for_norm': use_nonzero_mask_for_normalization,
'keep_only_largest_region': only_keep_largest_connected_component,
'min_region_size_per_class': min_region_size_per_class, 'min_size_per_class': min_size_per_class,
'transpose_forward': self.transpose_forward, 'transpose_backward': self.transpose_backward,
'data_identifier': self.data_identifier, 'plans_per_stage': self.plans_per_stage,
'preprocessor_name': self.preprocessor_name,
'conv_per_stage': self.conv_per_stage,
}
self.plans = plans
self.save_my_plans()
def determine_normalization_scheme(self):
schemes = OrderedDict()
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
for i in range(num_modalities):
if modalities[i] == "CT" or modalities[i] == 'ct':
schemes[i] = "CT"
elif modalities[i] == 'noNorm':
schemes[i] = "noNorm"
else:
schemes[i] = "nonCT"
return schemes
def save_properties_of_cropped(self, case_identifier, properties):
with open(join(self.folder_with_cropped_data, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
def load_properties_of_cropped(self, case_identifier):
with open(join(self.folder_with_cropped_data, "%s.pkl" % case_identifier), 'rb') as f:
properties = pickle.load(f)
return properties
def determine_whether_to_use_mask_for_norm(self):
# only use the nonzero mask for normalization of the cropping based on it resulted in a decrease in
# image size (this is an indication that the data is something like brats/isles and then we want to
# normalize in the brain region only)
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
use_nonzero_mask_for_norm = OrderedDict()
for i in range(num_modalities):
if "CT" in modalities[i]:
use_nonzero_mask_for_norm[i] = False
else:
all_size_reductions = []
for k in self.dataset_properties['size_reductions'].keys():
all_size_reductions.append(self.dataset_properties['size_reductions'][k])
if np.median(all_size_reductions) < 3 / 4.:
print("using nonzero mask for normalization")
use_nonzero_mask_for_norm[i] = True
else:
print("not using nonzero mask for normalization")
use_nonzero_mask_for_norm[i] = False
for c in self.list_of_cropped_npz_files:
case_identifier = get_case_identifier_from_npz(c)
properties = self.load_properties_of_cropped(case_identifier)
properties['use_nonzero_mask_for_norm'] = use_nonzero_mask_for_norm
self.save_properties_of_cropped(case_identifier, properties)
use_nonzero_mask_for_normalization = use_nonzero_mask_for_norm
return use_nonzero_mask_for_normalization
def write_normalization_scheme_to_patients(self):
"""
This is used for test set preprocessing
:return:
"""
for c in self.list_of_cropped_npz_files:
case_identifier = get_case_identifier_from_npz(c)
properties = self.load_properties_of_cropped(case_identifier)
properties['use_nonzero_mask_for_norm'] = self.plans['use_mask_for_norm']
self.save_properties_of_cropped(case_identifier, properties)
def run_preprocessing(self, num_threads):
if os.path.isdir(join(self.preprocessed_output_folder, "gt_segmentations")):
shutil.rmtree(join(self.preprocessed_output_folder, "gt_segmentations"))
shutil.copytree(join(self.folder_with_cropped_data, "gt_segmentations"),
join(self.preprocessed_output_folder, "gt_segmentations"))
normalization_schemes = self.plans['normalization_schemes']
use_nonzero_mask_for_normalization = self.plans['use_mask_for_norm']
intensityproperties = self.plans['dataset_properties']['intensityproperties']
preprocessor_class = recursive_find_python_class([join(nnunet.__path__[0], "preprocessing")],
self.preprocessor_name, current_module="nnunet.preprocessing")
assert preprocessor_class is not None
preprocessor = preprocessor_class(normalization_schemes, use_nonzero_mask_for_normalization,
self.transpose_forward,
intensityproperties)
target_spacings = [i["current_spacing"] for i in self.plans_per_stage.values()]
if self.plans['num_stages'] > 1 and not isinstance(num_threads, (list, tuple)):
num_threads = (default_num_threads, num_threads)
elif self.plans['num_stages'] == 1 and isinstance(num_threads, (list, tuple)):
num_threads = num_threads[-1]
preprocessor.run(target_spacings, self.folder_with_cropped_data, self.preprocessed_output_folder,
self.plans['data_identifier'], num_threads)
|
updater.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import os
import os.path
import logging
import logging.handlers
import sqlite3
import tweepy
import requests
from six.moves.urllib.request import urlretrieve
from six.moves.urllib.parse import urlencode
DB_FILENAME = 'aladin.db'
LOG_FILENAME = 'aladin.log'
ALADIN_ITEM_ENDPOINT = 'http://www.aladin.co.kr/ttb/api/itemlist.aspx'
class TwitterAuth:
"""트위터 인증용 클래스"""
def __init__(self, consumer_key, consumer_secret, access_token, token_secret):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = access_token
self.token_secret = token_secret
def get_oauth_handler(self):
"""OAuthHandler 작성"""
auth_handler = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)
auth_handler.set_access_token(self.access_token, self.token_secret)
return auth_handler
def make_url(category_id, ttb_key, partner_id, start, limit):
"""요청 URL 작성"""
query = dict()
query['querytype'] = 'itemnewall'
query['searchtarget'] = 'book'
query['version'] = '20131101'
query['cover'] = 'big'
query['output'] = 'js'
query['categoryid'] = category_id
query['ttbkey'] = ttb_key
query['start'] = start
query['maxresults'] = limit
query['optresult'] = 'ebooklist'
if partner_id:
query['partner'] = partner_id
query_string = urlencode(query)
return ALADIN_ITEM_ENDPOINT + '?' + query_string
|
def normalize_link(link):
"""링크 수정"""
return link.replace('\\/', '/').replace('&', '&')
def make_status(title, info, link):
"""트윗할 수 있게 타이틀의 사이즈를 줄임"""
# 트위터는 140자까지이나, 링크가 23자를 차지하므로 117까지 계산해서 카운트한다.
status = title + info
if len(status) > 117:
rest_count = 117 - len(info)
status = title[:rest_count-1] + '…' + info
return status + link
def create_db_if_not_exist(connection, cursor):
"""테이블이 없으면 생성"""
table_create_query = '''
CREATE TABLE IF NOT EXISTS books(
id INTEGER PRIMARY KEY,
title TEXT,
author TEXT,
publisher TEXT,
link TEXT,
img_src TEXT,
isbn TEXT
)'''
cursor.execute(table_create_query)
if cursor.rowcount != -1:
connection.commit()
def update_timeline(category_id, ttb_key, auth, partner_id='', start=1, limit=30):
"""타임라인 업데이트"""
query_url = make_url(category_id, ttb_key, partner_id, start, limit)
response = requests.get(query_url)
if response.status_code == 200:
res = response.json()
if res['totalResults'] > 0:
curdir = os.path.dirname(__file__)
# 로거
logfile_path = os.path.join(curdir, LOG_FILENAME)
logfile_size = 1024 * 1024 * 4
formatter = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s')
handler = logging.handlers.RotatingFileHandler(logfile_path, 'a', logfile_size, 8)
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
db = None
try:
db = sqlite3.connect(curdir + '/' + DB_FILENAME)
cursor = db.cursor()
create_db_if_not_exist(db, cursor)
twitter_api = tweepy.API(auth.get_oauth_handler())
for entity in res['item']:
# DB에 정보가 없을때에만 갱신.
cursor.execute('SELECT COUNT(*) FROM books WHERE id=?', (entity['itemId'],))
row = cursor.fetchone()
if row[0] == 0:
title = entity['title']
link = normalize_link(entity['link'])
cover = entity['cover']
# 이미지 소스를 받아옴
img_file = None
if cover is not None:
filename = os.path.basename(cover)
f = urlretrieve(cover, filename)
img_file = f[0]
# 트윗
try:
media_ids = None
if img_file is not None:
media_status = twitter_api.media_upload(filename=img_file)
# 이미지 파일은 하나뿐이므로 튜플이나 리스트로 만들어둬야 tweepy 내에서 오류가 생기지 않음
media_ids = (media_status.media_id,)
additional_info = u' ({0} / {1} / {2} / {3}원) '.format(entity['author'], entity['publisher'], entity['pubDate'], entity['priceStandard'])
status = make_status(title, additional_info, link)
update_status = twitter_api.update_status(status=status, media_ids=media_ids)
# 전자책 정보가 있을때는 한번 더 연결
if entity['subInfo']['ebookList']:
ebook_info = entity['subInfo']['ebookList'][0]
title = u'[eBook] ' + title
additional_info = u' ({0}원) '.format(ebook_info['priceSales'])
link = normalize_link(ebook_info['link'])
status = make_status(title, additional_info, link)
twitter_api.update_status(status=status, in_reply_to_status_id=update_status.id, media_ids=media_ids)
except tweepy.error.TweepError as e:
logger.error('%s - %s', title, e)
continue
finally:
if img_file is not None:
os.remove(img_file)
img_file = None
# DB에 추가
info = (entity['itemId'], entity['title'], entity['author'], entity['publisher'], link, cover, entity['isbn13'])
cursor.execute('INSERT INTO books VALUES (?, ?, ?, ?, ?, ?, ?)', info)
db.commit()
except sqlite3.Error as e:
if db is not None:
db.rollback()
logger.error(e)
finally:
if db is not None:
db.close()
| |
manual_rigid_body.py
|
import numpy as np
import nibabel as nib
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
from my_functions.matrix_stuff import *
def manual_rigid_body(fname = 'example_brain.nii.gz',
outmat = 'transformation.mat',
outimg = 'example_brain_transformed.nii.gz',
theta = np.radians([0,0,0]),
translation_vec = [0,0,0],
type = 'rotation',
flip_coordinates = [True, False, False]):
"""
Function to perform a rigid body transformation based on manually determined parameters.
Args:
- fname (str): filepath to input nifti image (.nii.gz)
- outmat (str): filepath of output 4x4 transformation matrix (.mat)
- outimg (str): filepath of transformed output image (.nii.gz)
- theta (np.array): vector of rotation angles in x,y,z dimension (in radians)
- translation_vec (np.array): vector for translation in x,y,z (in image coordinates)
- type (str): can be 'rotation' or 'translation' or 'rotation_translation'
- flip_coordinates (boolean vector): indicates for which axis the sign of the offset needs to be flipped
Returns:
- M (np.array): output 4x4 transformation matrix
- M is written to outmat
- the output image (outimg) is written out
Note on flip_coordinates:
Voxel coordinates in the image are expected to increase in the following directions
(it's similar to determining the reorient-command):
|
- second dimension: posterir -> anterior
- third dimension: inferior -> superior
if they go the other way, change input variable accordingly, e.g.:
flip_coordinates = [True, False, False]
"""
# get sform from image to determine offset of coordinate-system
img = nib.load(fname)
aff = img.get_affine()
offset = aff[0:3,3]
# which type of manipulation is requested
if type == 'rotation':
print('do rotation only')
M = rotation(theta, offset, flip_coordinates)
elif type == 'translation':
print('do translation only')
M = vector_to_translation_matrix(translation_vec)
elif type == 'rotation_translation':
print('do combined rotation and translation')
M = rotation_translation(theta, translation_vec, offset, flip_coordinates)
# save output matrix
print('output matrix: ', M)
print('save in: ', outmat)
save_matrix4x4(M, outmat)
# apply transformation to input image
applywarp_command = "applywarp -i " + fname + " -r " + fname + " --premat=" + outmat + " --interp=nn -o " + outimg
print('run flirt: ', applywarp_command)
os.system(applywarp_command)
return M
|
- first dimension: left -> right
|
test_image_annotator_client_v1.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.rpc import status_pb2
from google.cloud import vision_v1
from google.cloud.vision_v1.proto import image_annotator_pb2
from google.longrunning import operations_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestImageAnnotatorClient(object):
def test_batch_annotate_images(self):
# Setup Expected Response
expected_response = {}
expected_response = image_annotator_pb2.BatchAnnotateImagesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
# Setup Request
requests = []
response = client.batch_annotate_images(requests)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = image_annotator_pb2.BatchAnnotateImagesRequest(
requests=requests
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_batch_annotate_images_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
# Setup request
requests = []
with pytest.raises(CustomException):
client.batch_annotate_images(requests)
|
expected_response = {}
expected_response = image_annotator_pb2.AsyncBatchAnnotateFilesResponse(
**expected_response
)
operation = operations_pb2.Operation(
name="operations/test_async_batch_annotate_files", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
# Setup Request
requests = []
response = client.async_batch_annotate_files(requests)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = image_annotator_pb2.AsyncBatchAnnotateFilesRequest(
requests=requests
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_async_batch_annotate_files_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_async_batch_annotate_files_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
# Setup Request
requests = []
response = client.async_batch_annotate_files(requests)
exception = response.exception()
assert exception.errors[0] == error
|
def test_async_batch_annotate_files(self):
# Setup Expected Response
|
RegionJustification.spec.tsx
|
import {fireEvent, render, screen} from "@testing-library/react";
import '@testing-library/jest-dom/extend-expect'
import {_RegionProvider} from "../../components/common/context/RegionContext";
import {RegionalJustificationDialog} from "../../components/Dialog/RegionalJustification/RegionalJustificationDialog";
jest.doMock('../../components/Dialog/RegionalJustification/RegionalJustificationDialog', () => {
return (props) => (
<div id="regionjustification">
RegionalJustificationDialog
<button className="qa-submit" onClick={() => props.onSubmit('sdfg')}>
submit button
</button>
</div>
);
});
const {RegionJustification} = require('../../components/StatusDownloadPage/RegionJustification');
const featureCollectionExtent = {
type: 'FeatureCollection',
features: [{
type: 'Feature',
geometry: {
type: 'Polygon',
coordinates: [
[
[100.0, 0.0],
[101.0, 0.0],
[101.0, 1.0],
[100.0, 1.0],
[100.0, 0.0],
],
],
},
}],
};
const featureCollectionPolicy = {
type: 'FeatureCollection',
features: [{
type: 'Feature',
geometry: {
type: 'Polygon',
coordinates: [
[
[100.0, 0.0],
[102.0, 0.0],
[102.0, 1.0],
[100.0, 1.0],
[100.0, 0.0],
],
],
},
}],
};
const policy = {
|
policy_footer_text: 'footer text',
policy_cancel_text: 'cancel text',
policy_header_text: 'header text',
policy_title_text: 'title text',
policy_cancel_button_text: 'cancel button text',
region: featureCollectionPolicy,
justification_options: [{
id: '1',
name: 'option1',
display: true,
suboption: {
type: 'dropdown',
options: ['option1 dropoption1', 'option1 dropoption2'],
},
}, {
id: '2',
name: 'option2',
display: true,
suboption: {
type: 'text',
label: 'option2 label text',
},
}],
} as Eventkit.RegionPolicy;
describe('Region justification component', () => {
const defaultProps = () => ({
open: true,
onClose: jest.fn(),
providers: [{slug: 'providerslug'}],
extents: [featureCollectionExtent],
classes: {},
...(global as any).eventkit_test_props,
});
const setup = (propsOverride = {}, contextProps={}, renderer=render as any) => {
const props = {
...defaultProps(),
...propsOverride,
};
return renderer(
<_RegionProvider value={{
getPolicies: jest.fn(),
isFetching: false,
hasError: false,
policies: [policy],
submitPolicy: jest.fn(),
submittedPolicies: [],
...contextProps,
}}>
<RegionJustification {...props} />
</_RegionProvider>
);
};
it('should not display regionaljustification dialog by when provider isnt specified', () => {
setup({providers:[]});
expect(screen.queryByText('RegionalJustificationDialog')).not.toBeInTheDocument();
});
it('should fire onSubmit event when button is clicked.', () => {
const submitSpy = jest.fn();
setup(undefined, {submitPolicy: submitSpy});
const submitButton = document.querySelector(`.qa-submit`);
expect(submitButton).toBeInTheDocument();
expect(submitSpy).not.toHaveBeenCalled()
fireEvent(
submitButton,
new MouseEvent('click', {
bubbles: true,
cancelable: true,
})
);
expect(submitSpy).toHaveBeenCalledTimes(1);
});
});
|
providers: [{slug: 'providerslug', uid: 'sluguid', name: 'providername'}],
uid: 'policyuid',
policies: [{title: 'policy1 title', description: 'policy1 description'}],
|
test_rediscache.py
|
# SimpleCache Tests
# ~~~~~~~~~~~~~~~~~~~
from datetime import timedelta
from rediscache import SimpleCache, RedisConnect, cache_it, cache_it_json, CacheMissException, ExpiredKeyException, \
DoNotCache
from unittest import TestCase, main
import time
class ComplexNumber(object): # used in pickle test
def __init__(self, real, imag):
self.real = real
self.imag = imag
def __eq__(self, other):
return self.real == other.real and self.imag == other.imag
class SimpleCacheTest(TestCase):
def setUp(self):
|
def test_expire(self):
quick_c = SimpleCache()
quick_c.store("foo", "bar", expire=1)
time.sleep(1.1)
self.assertRaises(ExpiredKeyException, quick_c.get, "foo")
quick_c.flush()
quick_c.store("foo", "bar", expire=timedelta(seconds=1))
time.sleep(1.1)
self.assertRaises(ExpiredKeyException, quick_c.get, "foo")
quick_c.flush()
def test_miss(self):
self.assertRaises(CacheMissException, self.c.get, "blablabla")
def test_kwargs_decorator(self):
@cache_it_json(cache=self.c)
def add_it(a, b=10, c=5):
return a + b + c
add_it(3)
self.assertEqual(add_it(3), 18)
add_it(5, b=7)
self.assertEqual(add_it(5, b=7), 17)
add_it(6, c=3)
self.assertEqual(add_it(6, c=3), 19)
def test_store_retrieve(self):
self.c.store("foo", "bar")
foo = self.c.get("foo")
self.assertEqual(foo, "bar")
def test_json(self):
payload = {"example": "data"}
self.c.store_json("json", payload)
self.assertEqual(self.c.get_json("json"), payload)
def test_pickle(self):
payload = ComplexNumber(3, 4)
self.c.store_pickle("pickle", payload)
self.assertEqual(self.c.get_pickle("pickle"), payload)
def test_decorator(self):
self.redis.flushall()
mutable = []
@cache_it(cache=self.c)
def append(n):
mutable.append(n)
return mutable
append(1)
len_before = len(mutable)
mutable_cached = append(1)
len_after = len(mutable)
self.assertEqual(len_before, len_after)
self.assertNotEqual(id(mutable), id(mutable_cached))
self.assertEqual(mutable, mutable_cached)
def test_decorator_do_not_cache(self):
@cache_it(cache=self.c)
def test_no_cache(n):
result = n * 10
raise DoNotCache(result)
keys_before = len(self.c.keys())
r1 = test_no_cache(20)
r2 = test_no_cache(10)
r3 = test_no_cache(30)
r4 = test_no_cache(20)
self.assertEqual(r1, (10 * 20))
self.assertEqual(r2, (10 * 10))
self.assertEqual(r3, (10 * 30))
self.assertEqual(r4, (10 * 20))
keys_after = len(self.c.keys())
self.assertEqual(keys_before, keys_after)
def test_decorator_do_not_cache_reraised(self):
@cache_it(cache=self.c)
def test_no_cache(n):
result = n * 10
try:
raise DoNotCache(result)
except DoNotCache as e:
raise e
except Exception:
pass
keys_before = len(self.c.keys())
r1 = test_no_cache(20)
r2 = test_no_cache(10)
r3 = test_no_cache(30)
r4 = test_no_cache(20)
self.assertEqual(r1, (10 * 20))
self.assertEqual(r4, (10 * 20))
self.assertEqual(r2, (10 * 10))
self.assertEqual(r3, (10 * 30))
keys_after = len(self.c.keys())
self.assertEqual(keys_before, keys_after)
def test_decorator_do_not_cache_wrapping_exception(self):
@cache_it(cache=self.c)
def test_no_cache(n):
try:
result = n / 0
except ZeroDivisionError as e:
raise DoNotCache(e)
keys_before = len(self.c.keys())
r1 = test_no_cache(20)
self.assertTrue(isinstance(r1, ZeroDivisionError))
keys_after = len(self.c.keys())
self.assertEqual(keys_before, keys_after)
def test_decorator_json(self):
import random
mutable = {}
@cache_it_json(cache=self.c)
def set_key(n):
mutable[str(random.random())] = n
return mutable
set_key('a')
len_before = len(mutable)
mutable_cached = set_key('a')
len_after = len(mutable)
self.assertEqual(len_before, len_after)
self.assertNotEqual(id(mutable), id(mutable_cached))
self.assertEqual(mutable, mutable_cached)
def test_decorator_complex_type(self):
import math
@cache_it(cache=self.c)
def add(x, y):
return ComplexNumber(x.real + y.real, x.imag + y.imag)
result = add(ComplexNumber(3, 4), ComplexNumber(4, 5))
result_cached = add(ComplexNumber(3, 4), ComplexNumber(4, 5))
self.assertNotEqual(id(result), id(result_cached))
self.assertEqual(result, result_cached)
self.assertEqual(result, complex(3, 4) + complex(4, 5))
def test_cache_limit(self):
for i in range(100):
self.c.store("foo%d" % i, "foobar")
self.assertTrue(len(self.c) <= 10)
self.assertTrue(len(self.c.keys()) <= 10)
def test_flush(self):
connection = self.c.connection
connection.set("will_not_be_deleted", '42')
self.c.store("will_be_deleted", '10')
len_before = len(self.c)
len_keys_before = len(connection.keys(self.c.make_key("*")))
self.c.flush()
len_after = len(self.c)
len_keys_after = connection.get("will_not_be_deleted")
self.assertTrue(len_before > 0)
self.assertEqual(len_after, 0)
self.assertTrue(len_keys_before > 0)
self.assertEqual(len_keys_after, '42')
self.assertEqual(connection.get("will_not_be_deleted"), '42')
connection.delete("will_not_be_deleted")
def test_flush_namespace(self):
self.redis.flushall()
self.c.store("foo:one", "bir")
self.c.store("foo:two", "bor")
self.c.store("fii", "bur")
len_keys_before = len(self.c.keys())
self.c.flush_namespace('foo')
len_keys_after = len(self.c.keys())
self.assertEqual((len_keys_before - len_keys_after), 2)
self.assertEqual(self.c.get('fii'), 'bur')
self.assertRaises(CacheMissException, self.c.get, "foo:one")
self.assertRaises(CacheMissException, self.c.get, "foo:two")
self.c.flush()
def test_flush_multiple(self):
c1 = SimpleCache(10, namespace=__name__)
c2 = SimpleCache(10)
c1.store("foo", "bar")
c2.store("foo", "bar")
c1.flush()
self.assertEqual(len(c1), 0)
self.assertEqual(len(c2), 1)
c2.flush()
def test_expire_all_in_set(self):
self.c.store("foo", "bir")
self.c.store("fuu", "bor")
self.c.store("fii", "bur")
self.assertEqual(self.c.expire_all_in_set(), (3, 3))
self.assertRaises(ExpiredKeyException, self.c.get, "foo")
self.assertRaises(ExpiredKeyException, self.c.get, "fuu")
self.assertRaises(ExpiredKeyException, self.c.get, "fii")
self.assertTrue(self.c.isexpired("foo"))
self.assertTrue(self.c.isexpired("fuu"))
self.assertTrue(self.c.isexpired("fii"))
def test_expire_namespace(self):
self.c.store("foo:one", "bir")
self.c.store("foo:two", "bor")
self.c.store("fii", "bur")
self.assertEqual(self.c.expire_namespace('foo'), (3, 2))
self.assertRaises(ExpiredKeyException, self.c.get, "foo:one")
self.assertRaises(ExpiredKeyException, self.c.get, "foo:two")
self.assertTrue(self.c.isexpired("foo:one"))
self.assertTrue(self.c.isexpired("foo:two"))
self.assertTrue(self.c.isexpired("fii") > 0)
self.c.flush()
def test_mget(self):
self.c.store("a1", "a")
self.c.store("a2", "aa")
self.c.store("a3", "aaa")
d = self.c.mget(["a1", "a2", "a3"])
self.assertEqual(d["a1"], "a")
self.assertEqual(d["a2"], "aa")
self.assertEqual(d["a3"], "aaa")
def test_mget_nonexistant_key(self):
self.c.store("b1", "b")
self.c.store("b3", "bbb")
d = self.c.mget(["b1", "b2", "b3"])
self.assertEqual(d["b1"], "b")
self.assertTrue("b2" not in d)
self.assertEqual(d["b3"], "bbb")
def test_mget_expiry(self):
self.c.store("c1", "c")
self.c.store("c2", "cc", expire=1)
self.c.store("c3", "ccc")
time.sleep(1.1)
d = self.c.mget(["c1", "c2", "c3"])
self.assertEqual(d["c1"], "c")
self.assertTrue("c2" not in d)
self.assertEqual(d["c3"], "ccc")
def test_mget_json(self):
payload_a1 = {"example_a1": "data_a1"}
payload_a2 = {"example_a2": "data_a2"}
self.c.store_json("json_a1", payload_a1)
self.c.store_json("json_a2", payload_a2)
d = self.c.mget_json(["json_a1", "json_a2"])
self.assertEqual(d["json_a1"], payload_a1)
self.assertEqual(d["json_a2"], payload_a2)
def test_mget_json_nonexistant_key(self):
payload_b1 = {"example_b1": "data_b1"}
payload_b3 = {"example_b3": "data_b3"}
self.c.store_json("json_b1", payload_b1)
self.c.store_json("json_b3", payload_b3)
d = self.c.mget_json(["json_b1", "json_b2", "json_b3"])
self.assertEqual(d["json_b1"], payload_b1)
self.assertTrue("json_b2" not in d)
self.assertEqual(d["json_b3"], payload_b3)
def test_invalidate_key(self):
self.c.store("d1", "d")
self.c.store("d2", "dd")
self.c.store("d3", "ddd")
self.c.invalidate("d2")
d = self.c.mget(["d1", "d2", "d3"])
self.assertEqual(d["d1"], "d")
self.assertTrue("d2" not in d)
self.assertEqual(d["d3"], "ddd")
def tearDown(self):
self.c.flush()
if __name__ == '__main__':
main()
|
self.c = SimpleCache(10) # Cache that has a maximum limit of 10 keys
self.assertIsNotNone(self.c.connection)
self.redis = RedisConnect().connect()
|
element_tet_p2.py
|
import numpy as np
from ..element_h1 import ElementH1
class ElementTetP2(ElementH1):
nodal_dofs = 1
edge_dofs = 1
dim = 3
maxdeg = 2
dofnames = ['u', 'u']
doflocs = np.array([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.],
[.5, 0., 0.],
[.5, .5, 0.],
[0., .5, 0.],
[0., .0, .5],
[.5, .0, .5],
[.0, .5, .5]])
def lbasis(self, X, i):
x, y, z = X
if i == 0: # at (0,0,0)
phi = (1. - 3.*x + 2.*x**2 - 3.*y + 4.*x*y +
2.*y**2 - 3.*z + 4.*x*z + 4.*y*z + 2.*z**2)
dphi = np.array([
-3. + 4.*x + 4.*y + 4.*z,
-3. + 4.*x + 4.*y + 4.*z,
-3. + 4.*x + 4.*y + 4.*z,
])
elif i == 1: # at (1,0,0)
phi = - 1.*x + 2.*x**2
dphi = np.array([
-1 + 4*x,
0*x,
0*x,
])
elif i == 2: # at (0,1,0)
phi = - 1.*y + 2.*y**2
dphi = np.array([
0*x,
-1. + 4.*y,
0*x,
])
elif i == 3: # at (0,0,1)
phi = - 1.*z + 2.*z**2
dphi = np.array([
0*x,
0*x,
-1. + 4.*z,
])
elif i == 4: # between (0,1)
phi = 4.*x - 4.*x**2 - 4.*x*y - 4*x*z
dphi = np.array([
4. - 8.*x - 4.*y - 4.*z,
-4.*x,
-4.*x,
])
elif i == 5: # between (1,2)
phi = 4.*x*y
dphi = np.array([
4.*y,
4.*x,
0*x,
])
elif i == 6: # between (0,2)
phi = 0. + 4.*y - 4.*x*y - 4.*y**2 - 4.*y*z
|
4. - 4.*x - 8.*y - 4.*z,
-4.*y,
])
elif i == 7: # between (0,3)
phi = 0. + 4.*z - 4.*x*z - 4.*y*z - 4.*z**2
dphi = np.array([
-4.*z,
-4.*z,
4. - 4.*x - 4.*y - 8.*z,
])
elif i == 8:
phi = 0. + 4.*x*z
dphi = np.array([
4.*z,
0*x,
4*x,
])
elif i == 9:
phi = 0. + 4.*y*z
dphi = np.array([
0*x,
4*z,
4*y,
])
else:
raise Exception("!")
return phi, dphi
|
dphi = np.array([
-4.*y,
|
petstore_test.go
|
// Copyright 2019 DeepMap, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"net/http"
"testing"
"github.com/labstack/echo/v4"
echo_middleware "github.com/labstack/echo/v4/middleware"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/indigonote/oapi-codegen/examples/petstore-expanded/echo/api"
"github.com/indigonote/oapi-codegen/pkg/middleware"
"github.com/indigonote/oapi-codegen/pkg/testutil"
)
func TestPetStore(t *testing.T)
|
{
var err error
// Here, we Initialize echo
e := echo.New()
// Now, we create our empty pet store
store := api.NewPetStore()
// Get the swagger description of our API
swagger, err := api.GetSwagger()
require.NoError(t, err)
// This disables swagger server name validation. It seems to work poorly,
// and requires our test server to be in that list.
swagger.Servers = nil
// Validate requests against the OpenAPI spec
e.Use(middleware.OapiRequestValidator(swagger))
// Log requests
e.Use(echo_middleware.Logger())
// We register the autogenerated boilerplate and bind our PetStore to this
// echo router.
api.RegisterHandlers(e, store)
// At this point, we can start sending simulated Http requests, and record
// the HTTP responses to check for validity. This exercises every part of
// the stack except the well-tested HTTP system in Go, which there is no
// point for us to test.
tag := "TagOfSpot"
name := "Spot"
newPet := api.NewPet{
Name: nil,
Tag: &tag,
Size: 20,
}
result := testutil.NewRequest().Post("/pets").WithJsonBody(newPet).Go(t, e)
// We expect 201 code on successful pet insertion
assert.Equal(t, http.StatusCreated, result.Code())
// We should have gotten a response from the server with the new pet. Make
// sure that its fields match.
var resultPet api.Pet
err = result.UnmarshalBodyToObject(&resultPet)
assert.NoError(t, err, "error unmarshaling response")
assert.Equal(t, newPet.Name, resultPet.Name)
assert.Equal(t, *newPet.Tag, *resultPet.Tag)
// This is the Id of the pet we inserted.
petId := resultPet.Id
// Test the getter function.
result = testutil.NewRequest().Get(fmt.Sprintf("/pets/%d", petId)).WithAcceptJson().Go(t, e)
var resultPet2 api.Pet
err = result.UnmarshalBodyToObject(&resultPet2)
assert.NoError(t, err, "error getting pet")
assert.Equal(t, resultPet, resultPet2)
// We should get a 404 on invalid ID
result = testutil.NewRequest().Get("/pets/27179095781").WithAcceptJson().Go(t, e)
assert.Equal(t, http.StatusNotFound, result.Code())
var petError api.Error
err = result.UnmarshalBodyToObject(&petError)
assert.NoError(t, err, "error getting response", err)
assert.Equal(t, int32(http.StatusNotFound), petError.Code)
// Let's insert another pet for subsequent tests.
tag = "TagOfFido"
name = "Fido"
newPet = api.NewPet{
Name: &name,
Tag: &tag,
Size: 10,
}
result = testutil.NewRequest().Post("/pets").WithJsonBody(newPet).Go(t, e)
// We expect 201 code on successful pet insertion
assert.Equal(t, http.StatusCreated, result.Code())
// We should have gotten a response from the server with the new pet. Make
// sure that its fields match.
err = result.UnmarshalBodyToObject(&resultPet)
assert.NoError(t, err, "error unmarshaling response")
petId2 := resultPet.Id
// Now, list all pets, we should have two
result = testutil.NewRequest().Get("/pets").WithAcceptJson().Go(t, e)
assert.Equal(t, http.StatusOK, result.Code())
var petList []api.Pet
err = result.UnmarshalBodyToObject(&petList)
assert.NoError(t, err, "error getting response", err)
assert.Equal(t, 2, len(petList))
// Filter pets by tag, we should have 1
petList = nil
result = testutil.NewRequest().Get("/pets?tags=TagOfFido").WithAcceptJson().Go(t, e)
assert.Equal(t, http.StatusOK, result.Code())
err = result.UnmarshalBodyToObject(&petList)
assert.NoError(t, err, "error getting response", err)
assert.Equal(t, 1, len(petList))
// Filter pets by non existent tag, we should have 0
petList = nil
result = testutil.NewRequest().Get("/pets?tags=NotExists").WithAcceptJson().Go(t, e)
assert.Equal(t, http.StatusOK, result.Code())
err = result.UnmarshalBodyToObject(&petList)
assert.NoError(t, err, "error getting response", err)
assert.Equal(t, 0, len(petList))
// Let's delete non-existent pet
result = testutil.NewRequest().Delete("/pets/7").Go(t, e)
assert.Equal(t, http.StatusNotFound, result.Code())
err = result.UnmarshalBodyToObject(&petError)
assert.NoError(t, err, "error unmarshaling PetError")
assert.Equal(t, int32(http.StatusNotFound), petError.Code)
// Now, delete both real pets
result = testutil.NewRequest().Delete(fmt.Sprintf("/pets/%d", petId)).Go(t, e)
assert.Equal(t, http.StatusNoContent, result.Code())
result = testutil.NewRequest().Delete(fmt.Sprintf("/pets/%d", petId2)).Go(t, e)
assert.Equal(t, http.StatusNoContent, result.Code())
// Should have no pets left.
petList = nil
result = testutil.NewRequest().Get("/pets").WithAcceptJson().Go(t, e)
assert.Equal(t, http.StatusOK, result.Code())
err = result.UnmarshalBodyToObject(&petList)
assert.NoError(t, err, "error getting response", err)
assert.Equal(t, 0, len(petList))
}
|
|
worker_lookup_params.py
|
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
logger = logging.getLogger(__name__)
class WorkerLookUp():
def __init__(self):
self.id_obj = {"jsonrpc": "2.0", "method": "WorkerLookUp", "id": 1}
self.params_obj = {}
self.request_mode = "file"
self.tamper = {"params": {}}
self.output_json_file_name = "worker_lookup"
def add_json_values(self, input_json_temp, tamper):
if "workerType" in input_json_temp["params"].keys():
if input_json_temp["params"]["workerType"] != "":
self.set_worker_type(input_json_temp["params"]["workerType"])
else:
self.set_worker_type(1)
if "id" in input_json_temp.keys():
self.set_request_id(input_json_temp["id"])
for key in tamper["params"].keys():
param = key
value = tamper["params"][key]
self.set_unknown_parameter(param, value)
def set_unknown_parameter(self, param, value):
self.params_obj[param] = value
def set_worker_type(self, worker_type):
self.params_obj["workerType"] = worker_type
def set_request_id(self, request_id):
self.id_obj["id"] = request_id
def get_params(self):
return self.params_obj.copy()
def to_string(self):
json_rpc_request = self.id_obj
json_rpc_request["params"] = self.get_params()
return json.dumps(json_rpc_request, indent=4)
def configure_data(
self, input_json, worker_obj, pre_test_response):
|
def configure_data_sdk(
self, input_json, worker_obj, pre_test_response):
if input_json is None:
worker_type = 'SGX'
else:
try:
worker_value = input_json["params"]["workerType"]
if worker_value == 1:
worker_type = 'SGX'
elif worker_value == 2:
worker_type = 'MPC'
elif worker_value == 3:
worker_type = 'ZK'
else:
worker_type = worker_value
except LookupError:
worker_type = ""
return worker_type
|
if input_json is None:
self.set_worker_type(1)
else:
self.add_json_values(input_json, self.tamper)
final_json = json.loads(self.to_string())
return final_json
|
trending.js
|
const got = require('@/utils/got');
const querystring = require('querystring');
const got_ins = got.extend({
responseType: 'json',
headers: { 'X-Juejin-Src': 'web' },
});
module.exports = async (ctx) => {
const category = ctx.params.category;
const type = ctx.params.type;
let id = 'all';
let name = '';
await got_ins.get('https://gold-tag-ms.juejin.im/v1/categories').then(function (response) {
const [item] = response.data.d.categoryList.filter((item) => category.localeCompare(item.title) === 0);
if (item !== undefined) {
id = item.id;
name = item.name;
}
});
const params = {
monthly: { period: 'month', title: '本月', link: 'monthlyHottest', url: 'get_entry_by_period' },
weekly: { period: 'week', title: '本周', link: 'weeklyHottest', url: 'get_entry_by_period' },
historical: { period: '', title: '历史', link: 'hottest', url: 'get_entry_by_hot' },
};
const p = params[type];
const qs = querystring.stringify({
src: 'web',
limit: 20,
period: p.period,
category: id,
});
const title = `掘金${name}${p.title}最热`;
const url = `https://timeline-merger-ms.juejin.im/v1/${p.url}?${qs}`;
const link = `https://juejin.im/timeline/${id}?sort=${p.link}`;
const trendingResponse = await got_ins.get(url);
const entrylist = trendingResponse.data.d.entrylist;
const resultItems = await Promise.all(
entrylist.map(async (item) => {
const resultItem = {
title: item.title,
link: item.originalUrl,
description: item.summaryInfo,
pubDate: item.createdAt,
};
return Promise.resolve(resultItem);
})
);
ctx.state.data = {
title: title,
link: link,
item: resultItems,
|
};
};
| |
cms_discuzx_3_2_authority_bypass.py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
Pentestdb, a database for penetration test.
Copyright (c) 2015 alpha1e0
'''
from pentest.libs.exploit import Exploit
from pentest.libs.exploit import Result
class DiscuzAB(Exploit):
expName = u"DiscuzX 3.2绕过虚拟币支付查看内容"
version = "1.0"
author = "alpha1e0"
language = "php"
appName = "discuz"
appVersion = "x3.2"
reference = ['http://www.secpulse.com/archives/33393.html','http://www.wooyun.org/bugs/wooyun-2010-099659']
description = u'''
漏洞利用条件:1.DiscuzX 3.2;2.没有其他权限设置
gh: inurl:forum.php "金币 才能浏览"
'''
def _verify(self):
result = Result(self)
sig = u"才能浏览
|
userAgent = "Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)"
#userAgent = "Mozilla/5.0 (compatible; Baiduspider/2.0; +http://**.**.**.**/search/spider.html)"
headers = {'User-Agent':userAgent}
response = self.http.get(self.url)
response2 = self.http.get(self.url, headers=headers)
if response2.status_code==200:
if sig.encode("utf-8") in response.content and sig.encode("gbk")in response.content and sig.encode("utf-8") not in response2.content and sig.encode("gbk") not in response2.content:
result['fullpath'] = self.url
result['payload'] = userAgent
return result
|
"
|
fs.rs
|
use std::fs;
use std::path::Path;
use fluent_bundle::FluentResource;
use ignore::{WalkBuilder, WalkState};
use snafu::*;
pub use unic_langid::{langid, langids, LanguageIdentifier};
use crate::error;
pub fn
|
<P: AsRef<Path>>(path: P) -> crate::Result<FluentResource> {
let path = path.as_ref();
resource_from_str(&fs::read_to_string(path).context(error::Fs { path })?)
}
pub fn resource_from_str(src: &str) -> crate::Result<FluentResource> {
FluentResource::try_new(src.to_owned())
.map_err(|(_, errs)| errs)
.context(error::Fluent)
}
pub fn resources_from_vec(srcs: &[String]) -> crate::Result<Vec<FluentResource>> {
let mut vec = Vec::with_capacity(srcs.len());
for src in srcs {
vec.push(resource_from_str(&src)?);
}
Ok(vec)
}
pub(crate) fn read_from_dir<P: AsRef<Path>>(path: P) -> crate::Result<Vec<FluentResource>> {
let (tx, rx) = flume::unbounded();
WalkBuilder::new(path).build_parallel().run(|| {
let tx = tx.clone();
Box::new(move |result| {
if let Ok(entry) = result {
if entry
.file_type()
.as_ref()
.map_or(false, fs::FileType::is_file)
&& entry.path().extension().map_or(false, |e| e == "ftl")
{
if let Ok(string) = std::fs::read_to_string(entry.path()) {
let _ = tx.send(string);
} else {
log::warn!("Couldn't read {}", entry.path().display());
}
}
}
WalkState::Continue
})
});
resources_from_vec(&rx.drain().collect::<Vec<_>>())
}
#[cfg(test)]
mod tests {
use super::*;
use fluent_bundle::concurrent::FluentBundle;
use std::error::Error;
#[test]
fn test_load_from_dir() -> Result<(), Box<dyn Error>> {
let dir = tempfile::tempdir()?;
std::fs::write(dir.path().join("core.ftl"), "foo = bar\n".as_bytes())?;
std::fs::write(dir.path().join("other.ftl"), "bar = baz\n".as_bytes())?;
std::fs::write(dir.path().join("invalid.txt"), "baz = foo\n".as_bytes())?;
std::fs::write(dir.path().join(".binary_file.swp"), &[0, 1, 2, 3, 4, 5])?;
let result = read_from_dir(dir.path())?;
assert_eq!(2, result.len()); // Doesn't include the binary file or the txt file
let mut bundle = FluentBundle::new(vec![unic_langid::langid!("en-US")]);
for resource in &result {
bundle.add_resource(resource).unwrap();
}
let mut errors = Vec::new();
// Ensure the correct files were loaded
assert_eq!(
"bar",
bundle.format_pattern(
bundle.get_message("foo").and_then(|m| m.value).unwrap(),
None,
&mut errors
)
);
assert_eq!(
"baz",
bundle.format_pattern(
bundle.get_message("bar").and_then(|m| m.value).unwrap(),
None,
&mut errors
)
);
assert_eq!(None, bundle.get_message("baz")); // The extension was txt
Ok(())
}
}
|
read_from_file
|
zones.go
|
package dns
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"net/http"
)
// ZonesClient is the the DNS Management Client.
type ZonesClient struct {
BaseClient
}
// NewZonesClient creates an instance of the ZonesClient client.
func NewZonesClient(subscriptionID string) ZonesClient {
return NewZonesClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewZonesClientWithBaseURI creates an instance of the ZonesClient client.
func NewZonesClientWithBaseURI(baseURI string, subscriptionID string) ZonesClient
|
// CreateOrUpdate creates or updates a DNS zone. Does not modify DNS records within the zone.
//
// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating
// dot). parameters is parameters supplied to the CreateOrUpdate operation. ifMatch is the etag of the DNS zone.
// Omit this value to always overwrite the current zone. Specify the last-seen etag value to prevent accidentally
// overwritting any concurrent changes. ifNoneMatch is set to '*' to allow a new DNS zone to be created, but to
// prevent updating an existing zone. Other values will be ignored.
func (client ZonesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, zoneName string, parameters Zone, ifMatch string, ifNoneMatch string) (result Zone, err error) {
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, zoneName, parameters, ifMatch, ifNoneMatch)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", resp, "Failure sending request")
return
}
result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", resp, "Failure responding to request")
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client ZonesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, zoneName string, parameters Zone, ifMatch string, ifNoneMatch string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"zoneName": autorest.Encode("path", zoneName),
}
const APIVersion = "2018-03-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
if len(ifMatch) > 0 {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("If-Match", autorest.String(ifMatch)))
}
if len(ifNoneMatch) > 0 {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client ZonesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client ZonesClient) CreateOrUpdateResponder(resp *http.Response) (result Zone, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes a DNS zone. WARNING: All DNS records in the zone will also be deleted. This operation cannot be
// undone.
//
// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating
// dot). ifMatch is the etag of the DNS zone. Omit this value to always delete the current zone. Specify the
// last-seen etag value to prevent accidentally deleting any concurrent changes.
func (client ZonesClient) Delete(ctx context.Context, resourceGroupName string, zoneName string, ifMatch string) (result ZonesDeleteFuture, err error) {
req, err := client.DeletePreparer(ctx, resourceGroupName, zoneName, ifMatch)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", result.Response(), "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client ZonesClient) DeletePreparer(ctx context.Context, resourceGroupName string, zoneName string, ifMatch string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"zoneName": autorest.Encode("path", zoneName),
}
const APIVersion = "2018-03-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
if len(ifMatch) > 0 {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("If-Match", autorest.String(ifMatch)))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ZonesClient) DeleteSender(req *http.Request) (future ZonesDeleteFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client ZonesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets a DNS zone. Retrieves the zone properties, but not the record sets within the zone.
//
// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating
// dot).
func (client ZonesClient) Get(ctx context.Context, resourceGroupName string, zoneName string) (result Zone, err error) {
req, err := client.GetPreparer(ctx, resourceGroupName, zoneName)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client ZonesClient) GetPreparer(ctx context.Context, resourceGroupName string, zoneName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"zoneName": autorest.Encode("path", zoneName),
}
const APIVersion = "2018-03-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ZonesClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client ZonesClient) GetResponder(resp *http.Response) (result Zone, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List lists the DNS zones in all resource groups in a subscription.
//
// top is the maximum number of DNS zones to return. If not specified, returns up to 100 zones.
func (client ZonesClient) List(ctx context.Context, top *int32) (result ZoneListResultPage, err error) {
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, top)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.zlr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", resp, "Failure sending request")
return
}
result.zlr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client ZonesClient) ListPreparer(ctx context.Context, top *int32) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-03-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if top != nil {
queryParameters["$top"] = autorest.Encode("query", *top)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/dnszones", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ZonesClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client ZonesClient) ListResponder(resp *http.Response) (result ZoneListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client ZonesClient) listNextResults(lastResults ZoneListResult) (result ZoneListResult, err error) {
req, err := lastResults.zoneListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client ZonesClient) ListComplete(ctx context.Context, top *int32) (result ZoneListResultIterator, err error) {
result.page, err = client.List(ctx, top)
return
}
// ListByResourceGroup lists the DNS zones within a resource group.
//
// resourceGroupName is the name of the resource group. top is the maximum number of record sets to return. If not
// specified, returns up to 100 record sets.
func (client ZonesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, top *int32) (result ZoneListResultPage, err error) {
result.fn = client.listByResourceGroupNextResults
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, top)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", nil, "Failure preparing request")
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.zlr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", resp, "Failure sending request")
return
}
result.zlr, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", resp, "Failure responding to request")
}
return
}
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
func (client ZonesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, top *int32) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-03-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if top != nil {
queryParameters["$top"] = autorest.Encode("query", *top)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client ZonesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
// closes the http.Response Body.
func (client ZonesClient) ListByResourceGroupResponder(resp *http.Response) (result ZoneListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByResourceGroupNextResults retrieves the next set of results, if any.
func (client ZonesClient) listByResourceGroupNextResults(lastResults ZoneListResult) (result ZoneListResult, err error) {
req, err := lastResults.zoneListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
func (client ZonesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, top *int32) (result ZoneListResultIterator, err error) {
result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, top)
return
}
// Update updates a DNS zone. Does not modify DNS records within the zone.
//
// resourceGroupName is the name of the resource group. zoneName is the name of the DNS zone (without a terminating
// dot). parameters is parameters supplied to the Update operation. ifMatch is the etag of the DNS zone. Omit this
// value to always overwrite the current zone. Specify the last-seen etag value to prevent accidentally
// overwritting any concurrent changes.
func (client ZonesClient) Update(ctx context.Context, resourceGroupName string, zoneName string, parameters ZoneUpdate, ifMatch string) (result Zone, err error) {
req, err := client.UpdatePreparer(ctx, resourceGroupName, zoneName, parameters, ifMatch)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Update", nil, "Failure preparing request")
return
}
resp, err := client.UpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Update", resp, "Failure sending request")
return
}
result, err = client.UpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Update", resp, "Failure responding to request")
}
return
}
// UpdatePreparer prepares the Update request.
func (client ZonesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, zoneName string, parameters ZoneUpdate, ifMatch string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"zoneName": autorest.Encode("path", zoneName),
}
const APIVersion = "2018-03-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
if len(ifMatch) > 0 {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("If-Match", autorest.String(ifMatch)))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client ZonesClient) UpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client ZonesClient) UpdateResponder(resp *http.Response) (result Zone, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
|
{
return ZonesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
|
AbstractPager.js
|
"use strict";
class
|
{
constructor(isLogined){
this.isLogined = isLogined;
}
/*子类必须实现*/
_render(){
throw new Exception('子类必须实现');
}
render(){
return `
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<link rel="stylesheet" href="http://cdn.bootcss.com/bootstrap/3.3.5/css/bootstrap.min.css"/>
</head>
<body class="container">
<div class="jumbotron">
<h1>Node.js Blog Demo</h1>
</div>
<div>
${this.isLogined ? `` : `<a class="btn btn-success" href="/login">登录</a>`}
${this.isLogined ? `<a class="btn btn-default" href="/logout">退出</a><a class="btn btn-success" href="/add">添加</a>`:``}
</div>
${this._render()}
</body>
</html>
`;
}
}
module.exports = AbstractPager;
|
AbstractPager
|
issueAndTms.ts
|
import { suite, test } from '@testdeck/mocha'
import { issue, testCaseId } from '../../../src'
import { BaseTest } from './baseTest'
@suite
class
|
extends BaseTest {
@issue('4')
@testCaseId('5')
@test
shouldAssignDecoratedIssueAndTms() {}
}
|
IssueAndTms
|
dual_stream.rs
|
use super::tls_stream::TlsServerStream;
use super::websocket_stream::WebsocketServerStream;
use crate::Config;
use anyhow::Result;
use log::*;
use std::{
io,
pin::Pin,
task::{Context, Poll},
};
use tokio::{
io::{AsyncRead, AsyncWrite},
time::timeout,
};
pub struct ServerStream {
inner: Box<dyn super::AsyncIO>,
}
impl ServerStream {
pub async fn connect(config: &Config) -> Result<Self> {
let connection = timeout(
config.server_connection_timeout(),
TlsServerStream::connect(config, config.relay_tls_port()),
)
.await;
let connection: Box<dyn super::AsyncIO> = match connection {
Ok(Ok(con)) => Box::new(con),
err @ _ => {
error!(
"Failed to connect via TLS, falling back to websocket: {}",
match err {
Err(err) => err.to_string(),
Ok(Err(err)) => err.to_string(),
_ => unreachable!(),
}
);
let ws = timeout(
config.server_connection_timeout(),
WebsocketServerStream::connect(config),
)
.await??;
Box::new(ws)
}
};
Ok(Self { inner: connection })
}
}
impl AsyncRead for ServerStream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.inner).poll_read(cx, buf)
}
}
impl AsyncWrite for ServerStream {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> std::task::Poll<Result<usize, io::Error>> {
Pin::new(&mut self.inner).poll_write(cx, buf)
}
fn
|
(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Pin::new(&mut self.inner).poll_shutdown(cx)
}
}
|
poll_flush
|
auth.rs
|
extern crate openssl;
use std::fs::File;
use std::fs::{self, DirBuilder};
use std::io::prelude::*;
use std::os::unix::fs::PermissionsExt;
use self::openssl::hash::MessageDigest;
use self::openssl::pkey::PKey;
use self::openssl::rsa::Rsa;
use self::openssl::x509::X509Generator;
use self::openssl::x509::extension::{Extension, KeyUsageOption};
use config;
/// Helper function for creating a cert/key using filename
fn create_cert(years: u32, org: &str, filename: &str, key_options: Vec<KeyUsageOption>) {
create_auth_dir();
// Configure x509
|
let rsa = Rsa::generate(4096).unwrap();
let pkey = PKey::from_rsa(rsa).unwrap();
let gen = X509Generator::new()
.set_valid_period(365*years)
.add_name("CN".to_owned(), org.to_owned())
.set_sign_hash(MessageDigest::sha256())
.add_extension(Extension::KeyUsage(key_options));
let cert = gen.sign(&pkey).unwrap().to_pem();
let pkey_pem = pkey.private_key_to_pem();
// Write the certificate
let cert_file = config::get_auth_dir() + "/" + filename + ".pem";
let mut f = File::create(&cert_file).expect("Unable to create certificate file.");
for line in cert.unwrap() {
f.write(&[line]).expect("Error writing certificate!");
}
let metadata = f.metadata().expect("Unable to get file metadata");
let mut perms = metadata.permissions();
perms.set_mode(0o400);
fs::set_permissions(&cert_file, perms).expect("Unable to set certificate file to read-only");
// Write the private key
let pkey_file = config::get_auth_dir() + "/" + filename + ".key";
let mut f = File::create(&pkey_file).expect("Unable to create the private key");
for line in pkey_pem.unwrap() {
f.write(&[line]).expect("Error writing private key");
}
let metadata = f.metadata().expect("Unable to get file metadata");
let mut perms = metadata.permissions();
perms.set_mode(0o400);
fs::set_permissions(&pkey_file, perms).expect("Unable to set CA file to read-only");
}
/// Build client authentication certificates
pub fn build_client_auth(years: u32, org: &str) {
println!("Building authentication.");
create_auth_dir();
create_cert(years, org, "client", vec![KeyUsageOption::DigitalSignature]);
}
/// Build CA certificate for signing all client certificates.
/// NOTE: by design the CA certificate will not be able to sign transactions. It is
/// only used to authorize/de-authorize client certificates.
pub fn build_ca(years: u32, org: &str) {
println!("Creating CA");
create_auth_dir();
create_cert(years, org, "ca", vec![KeyUsageOption::KeyCertSign, KeyUsageOption::CRLSign]);
}
/// Insure the auth directory exists
fn create_auth_dir() {
let path = config::get_auth_dir();
DirBuilder::new()
.recursive(true)
.create(path).unwrap();
}
| |
camera.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import rospkg
import threading
import yaml
from copy import deepcopy
import message_filters
import numpy as np
import pyrobot.util as prutil
import rospy
from cv_bridge import CvBridge, CvBridgeError
from pyrobot.core import Camera
from sensor_msgs.msg import CameraInfo
from sensor_msgs.msg import Image
from sensor_msgs.msg import JointState
from std_msgs.msg import Float64
from tf import TransformListener
def constrain_within_range(value, MIN, MAX):
return min(max(value, MIN), MAX)
def is_within_range(value, MIN, MAX):
return (value <= MAX) and (value >= MIN)
class SimpleCamera(Camera):
"""
This is camera class that interfaces with the Realsense
camera on the locobot and locobot-lite.
This class does not have the pan and tilt actuation
capabilities for the camera.
"""
def __init__(self, configs):
"""
Constructor of the SimpleCamera class.
:param configs: Camera specific configuration object
:type configs: YACS CfgNode
"""
super(SimpleCamera, self).__init__(configs=configs)
self.cv_bridge = CvBridge()
self.camera_info_lock = threading.RLock()
self.camera_img_lock = threading.RLock()
self._tf_listener = TransformListener()
self.rgb_img = None
self.depth_img = None
self.camera_info = None
self.camera_P = None
rospy.Subscriber(self.configs.CAMERA.ROSTOPIC_CAMERA_INFO_STREAM,
CameraInfo,
self._camera_info_callback)
rgb_topic = self.configs.CAMERA.ROSTOPIC_CAMERA_RGB_STREAM
self.rgb_sub = message_filters.Subscriber(rgb_topic, Image)
depth_topic = self.configs.CAMERA.ROSTOPIC_CAMERA_DEPTH_STREAM
self.depth_sub = message_filters.Subscriber(depth_topic, Image)
img_subs = [self.rgb_sub, self.depth_sub]
self.sync = message_filters.ApproximateTimeSynchronizer(img_subs,
queue_size=10,
slop=0.2)
self.sync.registerCallback(self._sync_callback)
depth_threshold = (self.configs.BASE.VSLAM.DEPTH_MIN,
self.configs.BASE.VSLAM.DEPTH_MAX)
cfg_filename = self.configs.BASE.VSLAM.CFG_FILENAME
self.depth_cam = DepthImgProcessor(subsample_pixs=1,
depth_threshold=depth_threshold,
cfg_filename=cfg_filename)
self.cam_cf = self.configs.BASE.VSLAM.RGB_CAMERA_CENTER_FRAME
self.base_f = self.configs.BASE.VSLAM.VSLAM_BASE_FRAME
def _sync_callback(self, rgb, depth):
self.camera_img_lock.acquire()
try:
self.rgb_img = self.cv_bridge.imgmsg_to_cv2(rgb, "bgr8")
self.rgb_img = self.rgb_img[:, :, ::-1]
self.depth_img = self.cv_bridge.imgmsg_to_cv2(depth, "passthrough")
except CvBridgeError as e:
rospy.logerr(e)
self.camera_img_lock.release()
def _camera_info_callback(self, msg):
self.camera_info_lock.acquire()
self.camera_info = msg
self.camera_P = np.array(msg.P).reshape((3, 4))
self.camera_info_lock.release()
def get_rgb(self):
'''
This function returns the RGB image perceived by the camera.
:rtype: np.ndarray or None
'''
self.camera_img_lock.acquire()
rgb = deepcopy(self.rgb_img)
self.camera_img_lock.release()
return rgb
def get_depth(self):
'''
This function returns the depth image perceived by the camera.
:rtype: np.ndarray or None
'''
self.camera_img_lock.acquire()
depth = deepcopy(self.depth_img)
self.camera_img_lock.release()
return depth
def get_rgb_depth(self):
'''
This function returns both the RGB and depth
images perceived by the camera.
:rtype: np.ndarray or None
'''
self.camera_img_lock.acquire()
rgb = deepcopy(self.rgb_img)
depth = deepcopy(self.depth_img)
self.camera_img_lock.release()
return rgb, depth
def get_intrinsics(self):
"""
This function returns the camera intrinsics.
:rtype: np.ndarray
"""
if self.camera_P is None:
return self.camera_P
self.camera_info_lock.acquire()
P = deepcopy(self.camera_P)
self.camera_info_lock.release()
return P[:3, :3]
def get_current_pcd(self, in_cam=True):
"""
Return the point cloud at current time step (one frame only)
:param in_cam: return points in camera frame,
otherwise, return points in base frame
:type in_cam: bool
:returns: tuple (pts, colors)
pts: point coordinates in world frame (shape: :math:`[N, 3]`)
colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`)
:rtype: tuple(np.ndarray, np.ndarray)
"""
trans, rot, T = self.get_link_transform(self.cam_cf,
self.base_f)
base2cam_trans = np.array(trans).reshape(-1, 1)
base2cam_rot = np.array(rot)
rgb_im, depth_im = self.get_rgb_depth()
pcd_in_cam, colors = self.depth_cam.get_pcd_ic(depth_im=depth_im,
rgb_im=rgb_im)
pts = pcd_in_cam[:3, :].T
if in_cam:
return pts, colors
pts = np.dot(pts, base2cam_rot.T)
pts = pts + base2cam_trans.T
return pts, colors
def pix_to_3dpt(self, rs, cs, in_cam=False):
"""
Get the 3D points of the pixels in RGB images.
:param rs: rows of interest in the RGB image.
It can be a list or 1D numpy array
which contains the row indices.
The default value is None,
which means all rows.
:param cs: columns of interest in the RGB image.
It can be a list or 1D numpy array
which contains the column indices.
The default value is None,
which means all columns.
:param in_cam: return points in camera frame,
otherwise, return points in base frame
:type rs: list or np.ndarray
:type cs: list or np.ndarray
:type in_cam: bool
:returns: tuple (pts, colors)
pts: point coordinates in world frame
(shape: :math:`[N, 3]`)
colors: rgb values for pts_in_cam
(shape: :math:`[N, 3]`)
:rtype: tuple(np.ndarray, np.ndarray)
"""
trans, rot, T = self.get_link_transform(self.cam_cf,
self.base_f)
base2cam_trans = np.array(trans).reshape(-1, 1)
base2cam_rot = np.array(rot)
rgb_im, depth_im = self.get_rgb_depth()
pcd_in_cam = self.depth_cam.get_pix_3dpt(depth_im=depth_im,
rs=rs,
cs=cs)
pts = pcd_in_cam[:3, :].T
colors = rgb_im[rs, cs].reshape(-1, 3)
if in_cam:
return pts, colors
pts = np.dot(pts, base2cam_rot.T)
pts = pts + base2cam_trans.T
return pts, colors
def get_link_transform(self, src, tgt):
"""
Returns the latest transformation from the
target_frame to the source frame,
i.e., the transform of source frame w.r.t
target frame. If the returned
transform is applied to data, it will transform
data in the source_frame into
the target_frame
For more information, please refer to
http://wiki.ros.org/tf/Overview/Using%20Published%20Transforms
:param src: source frame
:param tgt: target frame
:type src: string
:type tgt: string
:returns: tuple(trans, rot, T)
trans: translational vector (shape: :math:`[3,]`)
rot: rotation matrix (shape: :math:`[3, 3]`)
T: transofrmation matrix (shape: :math:`[4, 4]`)
:rtype: tuple(np.ndarray, np.ndarray, np.ndarray)
"""
trans, quat = prutil.get_tf_transform(self._tf_listener,
tgt,
src)
rot = prutil.quat_to_rot_mat(quat)
T = np.eye(4)
T[:3, :3] = rot
T[:3, 3] = trans
return trans, rot, T
class LoCoBotCamera(SimpleCamera):
"""
This is camera class that interfaces with the Realsense
camera and the pan and tilt joints on the robot.
"""
def __init__(self, configs):
"""
Constructor of the LoCoBotCamera class.
:param configs: Object containing configurations for camera,
pan joint and tilt joint.
:type configs: YACS CfgNode
"""
use_camera = rospy.get_param('use_camera', False)
use_sim = rospy.get_param('use_sim', False)
use_camera = use_camera or use_sim
if not use_camera:
rospy.logwarn('Neither use_camera, nor use_sim, is not set'
' to True when the LoCoBot driver is launched.'
'You may not be able to command the camera'
' correctly using PyRobot!!!')
return
super(LoCoBotCamera, self).__init__(configs=configs)
rospy.Subscriber(self.configs.ARM.ROSTOPIC_JOINT_STATES,
JointState,
self._camera_pose_callback)
self.set_pan_pub = rospy.Publisher(
self.configs.CAMERA.ROSTOPIC_SET_PAN, Float64, queue_size=1)
self.set_tilt_pub = rospy.Publisher(
self.configs.CAMERA.ROSTOPIC_SET_TILT, Float64, queue_size=1)
self.pan = None
self.tilt = None
self.tol = 0.01
def _camera_pose_callback(self, msg):
if 'head_pan_joint' in msg.name:
pan_id = msg.name.index('head_pan_joint')
self.pan = msg.position[pan_id]
if 'head_tilt_joint' in msg.name:
tilt_id = msg.name.index('head_tilt_joint')
self.tilt = msg.position[tilt_id]
@property
def state(self):
"""
Return the current pan and tilt joint angles of the robot camera.
:return:
pan_tilt: A list the form [pan angle, tilt angle]
:rtype: list
"""
return self.get_state()
def get_state(self):
"""
Return the current pan and tilt joint angles of the robot camera.
:return:
pan_tilt: A list the form [pan angle, tilt angle]
:rtype: list
"""
return [self.pan, self.tilt]
def get_pan(self):
"""
Return the current pan joint angle of the robot camera.
:return:
pan: Pan joint angle
:rtype: float
"""
return self.pan
def get_tilt(self):
"""
Return the current tilt joint angle of the robot camera.
:return:
tilt: Tilt joint angle
:rtype: float
"""
return self.tilt
def set_pan(self, pan, wait=True):
"""
Sets the pan joint angle to the specified value.
:param pan: value to be set for pan joint
:param wait: wait until the pan angle is set to
the target angle.
:type pan: float
:type wait: bool
"""
pan = constrain_within_range(np.mod(pan + np.pi,
2 * np.pi) - np.pi,
self.configs.CAMERA.MIN_PAN,
self.configs.CAMERA.MAX_PAN)
self.set_pan_pub.publish(pan)
if wait:
for i in range(30):
rospy.sleep(0.1)
if np.fabs(self.get_pan() - pan) < self.tol:
break
def set_tilt(self, tilt, wait=True):
"""
Sets the tilt joint angle to the specified value.
:param tilt: value to be set for the tilt joint
:param wait: wait until the tilt angle is set to
the target angle.
:type tilt: float
:type wait: bool
"""
tilt = constrain_within_range(np.mod(tilt + np.pi,
2 * np.pi) - np.pi,
self.configs.CAMERA.MIN_TILT,
self.configs.CAMERA.MAX_TILT)
self.set_tilt_pub.publish(tilt)
if wait:
for i in range(30):
rospy.sleep(0.1)
if np.fabs(self.get_tilt() - tilt) < self.tol:
break
def set_pan_tilt(self, pan, tilt, wait=True):
"""
Sets both the pan and tilt joint angles to the specified values.
:param pan: value to be set for pan joint
:param tilt: value to be set for the tilt joint
:param wait: wait until the pan and tilt angles are set to
the target angles.
:type pan: float
:type tilt: float
:type wait: bool
"""
pan = constrain_within_range(np.mod(pan + np.pi,
2 * np.pi) - np.pi,
self.configs.CAMERA.MIN_PAN,
self.configs.CAMERA.MAX_PAN)
tilt = constrain_within_range(np.mod(tilt + np.pi,
2 * np.pi) - np.pi,
self.configs.CAMERA.MIN_TILT,
self.configs.CAMERA.MAX_TILT)
self.set_pan_pub.publish(pan)
self.set_tilt_pub.publish(tilt)
if wait:
for i in range(30):
rospy.sleep(0.1)
if np.fabs(self.get_pan() - pan) < self.tol and \
np.fabs(self.get_tilt() - tilt) < self.tol:
break
def reset(self):
"""
This function resets the pan and tilt joints by actuating
them to their home configuration.
"""
self.set_pan_tilt(self.configs.CAMERA.RESET_PAN,
self.configs.CAMERA.RESET_TILT)
class DepthImgProcessor:
"""
This class transforms the depth image and rgb image to point cloud
"""
def __init__(self, subsample_pixs=1, depth_threshold=(0, 1.5),
cfg_filename='realsense_d435.yaml'):
"""
The constructor for :class:`DepthImgProcessor` class.
:param subsample_pixs: sample rows and columns for the images
:param depth_threshold: minimum and maximum of valid depth values
:param cfg_filename: configuration file name for ORB-SLAM2
:type subsample_pixs: int
:type depth_threshold: tuple
:type cfg_filename: string
"""
assert (type(depth_threshold) is tuple and
0 < len(depth_threshold) < 3) or \
(depth_threshold is None)
self.subsample_pixs = subsample_pixs
self.depth_threshold = depth_threshold
self.cfg_data = self.read_cfg(cfg_filename)
self.intrinsic_mat = self.get_intrinsic()
self.intrinsic_mat_inv = np.linalg.inv(self.intrinsic_mat)
img_pixs = np.mgrid[0: self.cfg_data['Camera.height']: subsample_pixs,
0: self.cfg_data['Camera.width']: subsample_pixs]
img_pixs = img_pixs.reshape(2, -1)
img_pixs[[0, 1], :] = img_pixs[[1, 0], :]
self.uv_one = np.concatenate((img_pixs,
np.ones((1, img_pixs.shape[1]))))
self.uv_one_in_cam = np.dot(self.intrinsic_mat_inv, self.uv_one)
def get_pix_3dpt(self, depth_im, rs, cs):
"""
:param depth_im: depth image (shape: :math:`[H, W]`)
:param rs: rows of interest. It can be a list or 1D numpy array
which contains the row indices. The default value is None,
which means all rows.
:param cs: columns of interest. It can be a list or 1D numpy array
which contains the column indices.
The default value is None,
which means all columns.
:type depth_im: np.ndarray
:type rs: list or np.ndarray
:type cs: list or np.ndarray
:return: 3D point coordinates of the pixels in
camera frame (shape: :math:`[4, N]`)
:rtype np.ndarray
"""
assert isinstance(rs,
int) or isinstance(rs,
list) or isinstance(rs,
np.ndarray)
assert isinstance(cs,
int) or isinstance(cs,
list) or isinstance(cs,
np.ndarray)
if isinstance(rs, int):
rs = [rs]
if isinstance(cs, int):
cs = [cs]
if isinstance(rs, np.ndarray):
rs = rs.flatten()
if isinstance(cs, np.ndarray):
cs = cs.flatten()
depth_im = depth_im[rs, cs]
depth = depth_im.reshape(-1) / float(self.cfg_data['DepthMapFactor'])
img_pixs = np.stack((rs, cs)).reshape(2, -1)
img_pixs[[0, 1], :] = img_pixs[[1, 0], :]
uv_one = np.concatenate((img_pixs,
np.ones((1, img_pixs.shape[1]))))
uv_one_in_cam = np.dot(self.intrinsic_mat_inv, uv_one)
pts_in_cam = np.multiply(uv_one_in_cam, depth)
pts_in_cam = np.concatenate((pts_in_cam,
np.ones((1, pts_in_cam.shape[1]))),
axis=0)
return pts_in_cam
def get_pcd_ic(self, depth_im, rgb_im=None):
|
def get_pcd_iw(self, pts_in_cam, extrinsic_mat):
"""
Returns the point cloud in the world coordinate frame
:param pts_in_cam: point coordinates in
camera frame (shape: :math:`[4, N]`)
:param extrinsic_mat: extrinsic matrix for
the camera (shape: :math:`[4, 4]`)
:type pts_in_cam: np.ndarray
:type extrinsic_mat: np.ndarray
:return: point coordinates in
ORB-SLAM2's world frame (shape: :math:`[N, 3]`)
:rtype: np.ndarray
"""
# pcd in world
pts_in_world = np.dot(extrinsic_mat, pts_in_cam)
pts_in_world = pts_in_world[:3, :].T
return pts_in_world
def read_cfg(self, cfg_filename):
"""
Reads the configuration file
:param cfg_filename: configuration file name for ORB-SLAM2
:type cfg_filename: string
:return: configurations in the configuration file
:rtype: dict
"""
rospack = rospkg.RosPack()
slam_pkg_path = rospack.get_path('orb_slam2_ros')
cfg_path = os.path.join(slam_pkg_path,
'cfg',
cfg_filename)
with open(cfg_path, 'r') as f:
for i in range(1):
f.readline()
cfg_data = yaml.load(f)
return cfg_data
def get_intrinsic(self):
"""
Returns the instrinsic matrix of the camera
:return: the intrinsic matrix (shape: :math:`[3, 3]`)
:rtype: np.ndarray
"""
fx = self.cfg_data['Camera.fx']
fy = self.cfg_data['Camera.fy']
cx = self.cfg_data['Camera.cx']
cy = self.cfg_data['Camera.cy']
Itc = np.array([[fx, 0, cx],
[0, fy, cy],
[0, 0, 1]])
return Itc
|
"""
Returns the point cloud (filtered by minimum
and maximum depth threshold)
in camera's coordinate frame
:param depth_im: depth image (shape: :math:`[H, W]`)
:param rgb_im: rgb image (shape: :math:`[H, W, 3]`)
:type depth_im: np.ndarray
:type rgb_im: np.ndarray
:returns: tuple (pts_in_cam, rgb_im)
pts_in_cam: point coordinates in
camera frame (shape: :math:`[4, N]`)
rgb: rgb values for pts_in_cam (shape: :math:`[N, 3]`)
:rtype tuple(np.ndarray, np.ndarray)
"""
# pcd in camera from depth
depth_im = depth_im[0::self.subsample_pixs, 0::self.subsample_pixs]
rgb_im = rgb_im[0::self.subsample_pixs, 0::self.subsample_pixs]
depth = depth_im.reshape(-1) / float(self.cfg_data['DepthMapFactor'])
rgb = None
if rgb_im is not None:
rgb = rgb_im.reshape(-1, 3)
if self.depth_threshold is not None:
valid = depth > self.depth_threshold[0]
if len(self.depth_threshold) > 1:
valid = np.logical_and(valid,
depth < self.depth_threshold[1])
uv_one_in_cam = self.uv_one_in_cam[:, valid]
depth = depth[valid]
rgb = rgb[valid]
else:
uv_one_in_cam = self.uv_one_in_cam
pts_in_cam = np.multiply(uv_one_in_cam, depth)
pts_in_cam = np.concatenate((pts_in_cam,
np.ones((1, pts_in_cam.shape[1]))),
axis=0)
return pts_in_cam, rgb
|
redirect.js
|
'use strict'
async function
|
(server) {
server.route({
method: 'GET',
url: '/',
handler: async (_, reply) => {
reply.redirect('https://expo.dev/@nearform/optic-expo')
}
})
}
module.exports = redirectRoutes
|
redirectRoutes
|
log.rs
|
#![cfg(feature = "program")]
|
#[macro_export]
#[deprecated(
since = "1.4.3",
note = "Please use `safecoin_program::log::info` instead"
)]
macro_rules! info {
($msg:expr) => {
$crate::log::sol_log($msg)
};
($arg1:expr, $arg2:expr, $arg3:expr, $arg4:expr, $arg5:expr) => {
$crate::log::sol_log_64(
$arg1 as u64,
$arg2 as u64,
$arg3 as u64,
$arg4 as u64,
$arg5 as u64,
)
};
}
|
pub use safecoin_program::log::*;
|
index-test.js
|
import React from 'react';
import { expect } from 'chai';
import { mount, configure } from 'enzyme';
import {
Stage,
Layer,
Rect,
Group,
Image,
useStrictMode,
Text,
__matchRectVersion
} from '../src/ReactKonva';
import useImage from 'use-image';
import './mocking';
import Konva from 'konva';
import sinon from 'sinon/pkg/sinon';
import Adapter from 'enzyme-adapter-react-16';
configure({ adapter: new Adapter() });
describe('Test version matching', function() {
it('should match react version', function() {
expect(__matchRectVersion).to.equal(true);
});
});
describe('Test references', function() {
let instance;
class App extends React.Component {
render() {
return (
<Stage width={300} height={300} ref={node => (this.stage = node)}>
<Layer ref={node => (this.layer = node)} />
</Stage>
);
}
}
beforeEach(() => {
const wrapper = mount(<App />);
instance = wrapper.instance();
});
it('can get stage instance', function() {
const stageRef = instance.stage;
expect(stageRef.getStage() instanceof Konva.Stage).to.equal(true);
});
it('check initial props set', function() {
const stage = instance.stage.getStage();
expect(stage.width()).to.equal(300);
expect(stage.height()).to.equal(300);
});
it('can get layer instance', function() {
expect(instance.layer instanceof Konva.Layer).to.equal(true);
});
// how can we make this work?
it('stage ref should go to the stage', function() {
const stageRef = instance.stage;
expect(stageRef instanceof Konva.Stage).to.equal(true);
});
it('works ok with no ref', function() {
class App extends React.Component {
render() {
return (
<Stage width={300} height={300}>
<Layer ref={node => (this.layer = node)} />
</Stage>
);
}
}
const wrapper = mount(<App />);
instance = wrapper.instance();
});
it('works ok with react ref', function() {
class App extends React.Component {
stage = React.createRef();
render() {
return (
<Stage width={300} height={300} ref={this.stage}>
<Layer ref={node => (this.layer = node)} />
</Stage>
);
}
}
const wrapper = mount(<App />);
instance = wrapper.instance();
const stage = instance.stage.current;
expect(stage instanceof Konva.Stage).to.equal(true);
});
it('forward ref', function() {
const MyRect = React.forwardRef((props, ref) => <Rect ref={ref} />);
class App extends React.Component {
stage = React.createRef();
render() {
return (
<Stage width={300} height={300} ref={this.stage}>
<Layer ref={node => (this.layer = node)}>
<MyRect ref={node => (this.rect = node)} />
</Layer>
</Stage>
);
}
}
const wrapper = mount(<App />);
instance = wrapper.instance();
const rect = instance.rect;
expect(rect instanceof Konva.Rect).to.equal(true);
});
});
describe('Test stage component', function() {
it('can attach stage events', function() {
let eventCount = 0;
const handleEvent = () => {
eventCount += 1;
};
class App extends React.Component {
render() {
return (
<Stage
ref={node => (this.stage = node)}
width={300}
height={300}
onMouseDown={handleEvent}
>
<Layer ref={node => (this.layer = node)}>
<Rect ref={node => (this.rect = node)} width={100} height={100} />
</Layer>
</Stage>
);
}
}
const wrapper = mount(<App />);
const instance = wrapper.instance();
const stage = instance.stage.getStage();
stage.simulateMouseDown({ x: 50, y: 50 });
expect(eventCount).to.equal(1);
});
it('can attach stage content events', function() {
let eventCount = 0;
const handleEvent = () => {
eventCount += 1;
};
class App extends React.Component {
render() {
return (
<Stage
ref={node => (this.stage = node)}
width={300}
height={300}
onContentMouseDown={handleEvent}
>
<Layer ref={node => (this.layer = node)}>
<Rect ref={node => (this.rect = node)} width={100} height={100} />
</Layer>
</Stage>
);
}
}
const wrapper = mount(<App />);
const instance = wrapper.instance();
const stage = instance.stage.getStage();
stage.simulateMouseDown({ x: 50, y: 50 });
expect(eventCount).to.equal(1);
});
it('unmount stage should destroy it from Konva', () => {
class App extends React.Component {
render() {
if (this.props.skipStage) {
return <div />;
}
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer ref={node => (this.layer = node)} />
</Stage>
);
}
}
const wrapper = mount(<App />);
const instance = wrapper.instance();
const stagesNumber = Konva.stages.length;
wrapper.setProps({ skipStage: true });
expect(Konva.stages.length).to.equal(stagesNumber - 1);
});
it('test null event', function() {
class App extends React.Component {
render() {
return (
<Stage
ref={node => (this.stage = node)}
width={300}
height={300}
onMouseDown={null}
>
<Layer ref={node => (this.layer = node)}>
<Rect ref={node => (this.rect = node)} width={100} height={100} />
</Layer>
</Stage>
);
}
}
const wrapper = mount(<App />);
const instance = wrapper.instance();
const stage = instance.stage.getStage();
stage.simulateMouseDown({ x: 50, y: 50 });
});
});
describe('Test props setting', function() {
let instance, wrapper;
class App extends React.Component {
render() {
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer ref={node => (this.layer = node)}>
<Rect ref={node => (this.rect = node)} {...this.props.rectProps} />
</Layer>
</Stage>
);
}
}
beforeEach(() => {
wrapper = mount(<App />);
instance = wrapper.instance();
});
it('can update component props', () => {
const rect = instance.rect;
// set new props
const props1 = {
width: 100,
height: 100
};
wrapper.setProps({ rectProps: props1 });
expect(rect.width()).to.equal(100);
const props2 = {
width: 200,
height: 100
};
wrapper.setProps({ rectProps: props2 });
expect(rect.width()).to.equal(200);
});
it('can update component events', () => {
const rect = instance.rect;
// set new props
const props1 = {
onClick: () => {}
};
wrapper.setProps({ rectProps: props1 });
expect(rect.eventListeners.click.length).to.equal(1);
expect(rect.eventListeners.click[0].handler).to.equal(props1.onClick);
const props2 = {
onClick: () => {}
};
wrapper.setProps({ rectProps: props2 });
expect(rect.eventListeners.click.length).to.equal(1);
expect(rect.eventListeners.click[0].handler).to.equal(props2.onClick);
});
it('updating props should call layer redraw', () => {
const layer = instance.layer;
sinon.spy(layer, 'batchDraw');
wrapper.setProps({
rectProps: {
fill: 'green'
}
});
wrapper.setProps({
rectProps: {
fill: 'red'
}
});
expect(layer.batchDraw.callCount).to.equal(2);
});
it('unset props', () => {
const rect = instance.rect;
wrapper.setProps({
rectProps: {
fill: 'red',
x: 10
}
});
expect(rect.fill()).to.equal('red');
wrapper.setProps({ rectProps: {} });
expect(!!rect.fill()).to.equal(false);
expect(rect.x()).to.equal(0);
});
it('do not overwrite properties if that changed manually', () => {
const rect = instance.rect;
wrapper.setProps({
rectProps: {
fill: 'red',
x: 10
}
});
expect(rect.x()).to.equal(10);
// change position manually
rect.x(20);
wrapper.setProps({
rectProps: {
fill: 'red',
x: 10
}
});
expect(rect.x()).to.equal(20);
});
it('overwrite properties if that changed manually in strict-mode', () => {
useStrictMode(true);
const rect = instance.rect;
wrapper.setProps({
rectProps: {
fill: 'red',
x: 10
}
});
expect(rect.x()).to.equal(10);
// change position manually
rect.x(20);
wrapper.setProps({
rectProps: {
fill: 'red',
x: 10
}
});
expect(rect.x()).to.equal(10);
useStrictMode(false);
});
it('overwrite properties if that passed _useStrictMode', () => {
const rect = instance.rect;
wrapper.setProps({
rectProps: {
fill: 'red',
x: 10
}
});
expect(rect.x()).to.equal(10);
// change position manually
rect.x(20);
wrapper.setProps({
rectProps: {
fill: 'red',
x: 10,
_useStrictMode: true
}
});
expect(rect.x()).to.equal(10);
});
it('call draw immediately after props change to get 60 fps', async () => {
const layer = instance.layer;
sinon.spy(layer, 'batchDraw');
sinon.spy(layer, 'draw');
wrapper.setProps({
rectProps: {
fill: 'green'
}
});
expect(layer.batchDraw.calledOnce).to.equal(true);
expect(layer.draw.notCalled).to.equal(true);
const startTime = Date.now();
await 1;
expect(layer.draw.calledOnce).to.equal(true);
// Normally it's 1 or 2 ms.
// We just need to make sure it's less than min frame length 16ms.
expect(Date.now() - startTime < 15).to.equal(true);
});
it('call draw once if batchDraw is called multiple times inside one event loop', async () => {
const layer = instance.layer;
sinon.spy(layer, 'batchDraw');
sinon.spy(layer, 'draw');
wrapper.setProps({
rectProps: {
fill: 'green'
}
});
wrapper.setProps({
rectProps: {
fill: 'red'
}
});
expect(layer.batchDraw.callCount).to.equal(2);
expect(layer.draw.callCount).to.equal(0);
await 1;
expect(layer.draw.callCount).to.equal(1);
});
});
describe('test lifecycle methods', () => {
let instance, wrapper;
class SubComponent extends React.Component {
// comment, as it will be removed
// componentWillMount() {
// this.props.componentWillMount();
// }
componentDidMount() {
this.props.componentDidMount();
}
// componentWillReceiveProps(newProps) {
// this.props.componentWillReceiveProps(newProps);
// }
shouldComponentUpdate() {
this.props.shouldComponentUpdate(...arguments);
return true;
}
// componentWillUpdate() {
// this.props.componentWillUpdate();
// }
componentDidUpdate() {
this.props.componentDidUpdate();
}
componentWillUnmount() {
this.props.componentWillUnmount();
}
render() {
return <Rect />;
}
}
class App extends React.Component {
render() {
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer ref={node => (this.layer = node)}>
{this.props.dontDrawChildren ? null : (
<SubComponent {...this.props} />
)}
</Layer>
</Stage>
);
}
}
it('test mount', () => {
const props = {
// componentWillMount: sinon.spy(),
componentDidMount: sinon.spy()
};
wrapper = mount(<App {...props} />);
// expect(props.componentWillMount.called).to.equal(true);
expect(props.componentDidMount.called).to.equal(true);
});
it('test update', () => {
const props = {
// componentWillMount: sinon.spy(),
componentDidMount: sinon.spy(),
// componentWillReceiveProps: sinon.spy(),
shouldComponentUpdate: sinon.spy(),
// componentWillUpdate: sinon.spy(),
componentDidUpdate: sinon.spy(),
componentWillUnmount: sinon.spy()
};
wrapper = mount(<App {...props} />);
wrapper.setProps(props);
// expect(props.componentWillMount.called).to.equal(true);
expect(props.shouldComponentUpdate.called).to.equal(true);
// expect(props.componentWillUpdate.called).to.equal(true);
expect(props.componentDidUpdate.called).to.equal(true);
});
it('test remove', () => {
const props = {
// componentWillMount: sinon.spy(),
componentDidMount: sinon.spy(),
// componentWillReceiveProps: sinon.spy(),
shouldComponentUpdate: sinon.spy(),
// componentWillUpdate: sinon.spy(),
componentDidUpdate: sinon.spy(),
componentWillUnmount: sinon.spy()
};
wrapper = mount(<App {...props} />);
const stage = wrapper.instance().stage.getStage();
expect(stage.findOne('Rect')).to.not.equal(undefined);
props.dontDrawChildren = props;
wrapper.setProps(props);
expect(stage.findOne('Rect')).to.equal(undefined);
// This line don't work... why????
expect(props.componentWillUnmount.called).to.equal(true);
});
});
describe('Test Events', function() {
let instance;
class App extends React.Component {
render() {
return (
<Stage width={300} height={300} ref={node => (this.stage = node)}>
{this.props.shouldDrawLayer && (
<Layer
ref={node => (this.layer = node)}
onClick={this.props.onClick}
/>
)}
</Stage>
);
}
}
it('should remove events on unmount', function() {
const onClickRect = sinon.spy();
const onClickExternal = sinon.spy();
const wrapper = mount(<App onClick={onClickRect} shouldDrawLayer />);
instance = wrapper.instance();
const stageRef = instance.stage;
const layer = stageRef.getStage().findOne('Layer');
layer.on('click', onClickExternal);
expect(onClickRect.callCount).to.equal(0);
expect(onClickExternal.callCount).to.equal(0);
layer._fire('click', {});
expect(onClickRect.callCount).to.equal(1);
expect(onClickExternal.callCount).to.equal(1);
// remove layer
wrapper.setProps({ shouldDrawLayer: false });
expect(layer.getParent()).to.equal(null);
layer._fire('click', {});
expect(onClickRect.callCount).to.equal(1);
expect(onClickExternal.callCount).to.equal(2);
});
});
// will fail
describe.skip('Bad structure', () => {
it('No dom inside Konva', function() {
class App extends React.Component {
render() {
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer>
<div />
</Layer>
</Stage>
);
}
}
const wrapper = mount(<App />);
const instance = wrapper.instance();
const stage = instance.stage.getStage();
});
});
// TODO: how to fix it?
// react is creating new nodes before removing old one
// that creates mess in id references
// see: https://github.com/konvajs/react-konva/issues/119
describe('Check id saving', () => {
it('Konva can loose ids?', function() {
class App extends React.Component {
render() {
const kids = [
<Rect key="1" id="rect1" fill="red" />,
<Rect key="2" id="rect2" fill="green" />
];
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer>
{this.props.drawAsGroup ? <Group>{kids}</Group> : kids}
</Layer>
</Stage>
);
}
}
const wrapper = mount(<App />);
const instance = wrapper.instance();
const stage = instance.stage.getStage();
expect(stage.findOne('#rect1').fill()).to.equal('red');
expect(stage.findOne('#rect2').fill()).to.equal('green');
wrapper.setProps({ drawAsGroup: true });
expect(stage.findOne('#rect1').fill()).to.equal('red');
expect(stage.findOne('#rect2').fill()).to.equal('green');
});
});
describe('Test drawing calls', () => {
it('Draw layer on mount', function() {
class App extends React.Component {
render() {
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer>
<Rect fill="red" />
</Layer>
</Stage>
);
}
}
expect(Konva.Layer.prototype.batchDraw.callCount).to.equal(undefined);
sinon.spy(Konva.Layer.prototype, 'batchDraw');
const wrapper = mount(<App />);
expect(Konva.Layer.prototype.batchDraw.called).to.equal(true);
Konva.Layer.prototype.batchDraw.restore();
});
it('Draw layer on node add', function() {
class App extends React.Component {
render() {
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer>{this.props.showRect && <Rect fill="red" />}</Layer>
</Stage>
);
}
}
const wrapper = mount(<App />);
sinon.spy(Konva.Layer.prototype, 'batchDraw');
wrapper.setProps({ showRect: true });
expect(Konva.Layer.prototype.batchDraw.callCount).to.equal(1);
Konva.Layer.prototype.batchDraw.restore();
});
it('Draw layer on node remove', function() {
class
|
extends React.Component {
render() {
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer>{!this.props.hideRect && <Rect fill="red" />}</Layer>
</Stage>
);
}
}
const wrapper = mount(<App />);
sinon.spy(Konva.Layer.prototype, 'batchDraw');
expect(Konva.Layer.prototype.batchDraw.callCount).to.equal(0);
wrapper.setProps({ hideRect: true });
expect(Konva.Layer.prototype.batchDraw.callCount).to.equal(1);
Konva.Layer.prototype.batchDraw.restore();
});
});
describe('test reconciler', () => {
it('add before', function() {
class App extends React.Component {
render() {
const kids = this.props.drawMany
? [<Rect key="1" name="rect1" />, <Rect key="2" name="rect2" />]
: [<Rect key="2" name="rect2" />];
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer ref={node => (this.layer = node)}>{kids}</Layer>
</Stage>
);
}
}
const wrapper = mount(<App />);
sinon.spy(Konva.Layer.prototype, 'batchDraw');
wrapper.setProps({ drawMany: true });
const layer = wrapper.instance().layer;
expect(layer.children[0].name()).to.equal('rect1');
expect(layer.children[1].name()).to.equal('rect2');
expect(Konva.Layer.prototype.batchDraw.callCount).to.equal(1);
Konva.Layer.prototype.batchDraw.restore();
});
it('add before (mane)', function() {
class App extends React.Component {
render() {
const kids = this.props.drawMany
? [
<Rect key="1" name="rect1" />,
<Rect key="2" name="rect2" />,
<Rect key="3" name="rect3" />
]
: [<Rect key="1" name="rect1" />, <Rect key="3" name="rect3" />];
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer ref={node => (this.layer = node)}>{kids}</Layer>
</Stage>
);
}
}
const wrapper = mount(<App />);
wrapper.setProps({ drawMany: true });
const layer = wrapper.instance().layer;
expect(layer.children[0].name()).to.equal('rect1');
expect(layer.children[1].name()).to.equal('rect2');
expect(layer.children[2].name()).to.equal('rect3');
});
it('add after', function() {
class App extends React.Component {
render() {
const kids = this.props.drawMany
? [<Rect key="1" name="rect1" />, <Rect key="2" name="rect2" />]
: [<Rect key="1" name="rect1" />];
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer ref={node => (this.layer = node)}>{kids}</Layer>
</Stage>
);
}
}
const wrapper = mount(<App />);
sinon.spy(Konva.Layer.prototype, 'batchDraw');
wrapper.setProps({ drawMany: true });
const layer = wrapper.instance().layer;
expect(layer.children[0].name()).to.equal('rect1');
expect(layer.children[1].name()).to.equal('rect2');
expect(Konva.Layer.prototype.batchDraw.callCount).to.equal(1);
Konva.Layer.prototype.batchDraw.restore();
});
it('change order', function() {
class App extends React.Component {
render() {
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer ref={node => (this.layer = node)}>{this.props.kids}</Layer>
</Stage>
);
}
}
let kids = [
<Rect key="1" name="rect1" />,
<Rect key="2" name="rect2" />,
<Rect key="3" name="rect3" />
];
const wrapper = mount(<App kids={kids} />);
const layer = wrapper.instance().layer;
expect(layer.children[0].name()).to.equal('rect1');
expect(layer.children[1].name()).to.equal('rect2');
expect(layer.children[2].name()).to.equal('rect3');
// last to first
kids = [
<Rect key="3" name="rect3" />,
<Rect key="1" name="rect1" />,
<Rect key="2" name="rect2" />
];
wrapper.setProps({ kids });
expect(layer.children[0].name()).to.equal('rect3');
expect(layer.children[1].name()).to.equal('rect1');
expect(layer.children[2].name()).to.equal('rect2');
// second to first
kids = [
<Rect key="1" name="rect1" />,
<Rect key="3" name="rect3" />,
<Rect key="2" name="rect2" />
];
wrapper.setProps({ kids });
expect(layer.children[0].name()).to.equal('rect1');
expect(layer.children[1].name()).to.equal('rect3');
expect(layer.children[2].name()).to.equal('rect2');
kids = [
<Rect key="2" name="rect2" />,
<Rect key="1" name="rect1" />,
<Rect key="3" name="rect3" />
];
wrapper.setProps({ kids });
expect(layer.children[0].name()).to.equal('rect2');
expect(layer.children[1].name()).to.equal('rect1');
expect(layer.children[2].name()).to.equal('rect3');
kids = [
<Rect key="4" name="rect4" />,
<Rect key="2" name="rect2" />,
<Rect key="1" name="rect1" />,
<Rect key="3" name="rect3" />
];
wrapper.setProps({ kids });
expect(layer.children[0].name()).to.equal('rect4');
expect(layer.children[1].name()).to.equal('rect2');
expect(layer.children[2].name()).to.equal('rect1');
expect(layer.children[3].name()).to.equal('rect3');
});
it('changing order should not stop dragging', function() {
class App extends React.Component {
render() {
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer ref={node => (this.layer = node)}>{this.props.kids}</Layer>
</Stage>
);
}
}
let kids = [
<Rect key="1" name="rect1" />,
<Rect key="2" name="rect2" />,
<Rect key="3" name="rect3" />
];
const wrapper = mount(<App kids={kids} />);
const layer = wrapper.instance().layer;
const rect1 = layer.findOne('.rect1');
layer.getStage().simulateMouseDown({ x: 5, y: 5 });
rect1.startDrag();
// move mouse
layer.getStage().simulateMouseMove({ x: 10, y: 10 });
expect(rect1.isDragging()).to.equal(true);
kids = [
<Rect key="3" name="rect3" />,
<Rect key="1" name="rect1" />,
<Rect key="2" name="rect2" />
];
wrapper.setProps({ kids });
expect(rect1.isDragging()).to.equal(true);
rect1.stopDrag();
});
});
describe('Test context API', function() {
let instance;
const { Consumer, Provider } = React.createContext({
width: 100,
height: 100
});
class App extends React.Component {
render() {
return (
<Provider value={{ width: 200, height: 100 }}>
<Consumer>
{({ width, height }) => (
<Stage
width={width}
height={height}
ref={node => (this.stage = node)}
>
<Layer ref={node => (this.layer = node)} />
</Stage>
)}
</Consumer>
</Provider>
);
}
}
beforeEach(() => {
const wrapper = mount(<App />);
instance = wrapper.instance();
});
it('test correct set', function() {
const stageRef = instance.stage;
const stage = stageRef.getStage();
expect(stage.width()).to.equal(200);
expect(stage.height()).to.equal(100);
});
});
// wait for react team response
describe('Test nested context API', function() {
const Context = React.createContext({
color: 'red'
});
class Tools extends React.Component {
static contextType = Context;
render() {
return (
<Layer>
<Rect width={50} height={50} fill={this.context.color} />
</Layer>
);
}
}
class Canvas extends React.Component {
static contextType = Context;
render() {
return (
<Stage width={300} height={200} ref={node => (this.stage = node)}>
<Tools />
</Stage>
);
}
}
class App extends React.Component {
render() {
return (
<Context.Provider value={{ color: 'black' }}>
<Canvas />
</Context.Provider>
);
}
}
beforeEach(() => {
mount(<App />);
});
it.skip('test correct set', function() {
const stage = Konva.stages[Konva.stages.length - 1];
expect(stage.findOne('Rect').fill()).to.equal('black');
});
});
// wait for react team response
describe('try lazy and suspense', function() {
const LazyRect = React.lazy(() => {
return new Promise(resolve => {
setTimeout(() => {
resolve({
default: () => <Rect />
});
}, 5);
});
});
class App extends React.Component {
render() {
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer ref={node => (this.layer = node)}>
<React.Suspense fallback={<Text text="fallback" />}>
<LazyRect />
</React.Suspense>
</Layer>
</Stage>
);
}
}
let instance;
beforeEach(() => {
const wrapper = mount(<App />);
instance = wrapper.instance();
});
it('can use lazy and suspense', function(done) {
const stageRef = instance.stage;
const stage = stageRef.getStage();
expect(stage.find('Text').length).to.equal(1);
expect(stage.find('Shape').length).to.equal(1);
setTimeout(() => {
expect(stage.find('Text').length).to.equal(0);
expect(stage.find('Rect').length).to.equal(1);
expect(stage.find('Shape').length).to.equal(1);
done();
}, 50);
});
});
describe('Fragments', function() {
const Fragmented = () => (
<React.Fragment>
<Rect />
<Rect />
</React.Fragment>
);
class App extends React.Component {
render() {
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer ref={node => (this.layer = node)}>
<Fragmented />
</Layer>
</Stage>
);
}
}
let instance;
beforeEach(() => {
const wrapper = mount(<App />);
instance = wrapper.instance();
});
it('can use lazy and suspense', function() {
const stage = instance.stage;
expect(stage.find('Rect').length).to.equal(2);
});
});
describe('warnings', function() {
class App extends React.Component {
render() {
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer ref={node => (this.layer = node)}>
<Rect draggable x={0} y={0} />
</Layer>
</Stage>
);
}
}
it('check draggable warning', function() {
const wrapper = mount(<App />);
// sinon.spy(console, 'warning');
// expect(console.warning.callCount).to.equal(1);
});
});
describe('Hooks', function() {
it('check setState hook', function() {
const App = () => {
const [fill, setColor] = React.useState('black');
return (
<Stage width={300} height={300}>
<Layer>
<Rect
fill={fill}
width={100}
height={100}
onMouseDown={() => {
setColor('red');
}}
/>
</Layer>
</Stage>
);
};
const wrapper = mount(<App />);
const instance = wrapper.instance();
const stage = Konva.stages[Konva.stages.length - 1];
expect(stage.findOne('Rect').fill()).to.equal('black');
stage.simulateMouseDown({ x: 50, y: 50 });
expect(stage.findOne('Rect').fill()).to.equal('red');
});
it('check useEffect hook', function(done) {
let callCount = 0;
const App = () => {
React.useEffect(() => {
callCount += 1;
});
return (
<Stage width={300} height={300}>
<Layer />
</Stage>
);
};
const wrapper = mount(<App />);
// not sure why timeouts are required
// are hooks async?
setTimeout(() => {
expect(callCount).to.equal(1);
wrapper.setProps({ randomProp: 1 });
setTimeout(() => {
expect(callCount).to.equal(2);
done();
}, 50);
}, 50);
});
it('check useEffect hook 2', function(done) {
let callCount = 0;
const MyRect = ({ name }) => {
React.useEffect(() => {
callCount += 1;
});
return <Rect name={name} />;
};
const App = () => {
const [name, setName] = React.useState('');
React.useEffect(() => {
setName('rect name');
}, []);
return (
<Stage width={300} height={300}>
<Layer>
<MyRect name={name} />
</Layer>
</Stage>
);
};
const wrapper = mount(<App />);
// not sure why timeouts are required
// are hooks async?
setTimeout(() => {
const stage = Konva.stages[Konva.stages.length - 1];
const rect = stage.findOne('Rect');
expect(rect.name()).to.equal('rect name');
expect(callCount).to.equal(2);
done();
}, 50);
});
it('check useImage hook', function(done) {
const url = 'https://konvajs.org/favicon-32x32.png';
const App = () => {
const [image, status] = useImage(url);
return (
<Stage width={300} height={300}>
<Layer>
<Image image={image} />
<Text text={status} />
</Layer>
</Stage>
);
};
const wrapper = mount(<App />);
const stage = Konva.stages[Konva.stages.length - 1];
// not image while loading
expect(stage.findOne('Image').image()).to.equal(undefined);
expect(stage.findOne('Text').text()).to.equal('loading');
const img = new window.Image();
img.onload = () => {
// here should hook trigger
setTimeout(() => {
expect(stage.findOne('Image').image()).not.to.equal(undefined);
expect(stage.findOne('Text').text()).to.equal('loaded');
done();
}, 50);
};
img.src = url;
});
it('unsubscribe on unmount', function(done) {
const url = 'https://konvajs.org/favicon-32x32.png';
const App = () => {
const [image, status] = useImage(url);
return (
<Stage width={300} height={300}>
<Layer>
<Image image={image} />
<Text text={status} />
</Layer>
</Stage>
);
};
const wrapper = mount(<App />);
const stage = Konva.stages[Konva.stages.length - 1];
// not image while loading
expect(stage.findOne('Image').image()).to.equal(undefined);
expect(stage.findOne('Text').text()).to.equal('loading');
wrapper.unmount();
const img = new window.Image();
img.onload = () => {
setTimeout(() => {
// image is loaded here
// if hook is unsubcribed we should have no errors
// so just
done();
}, 50);
};
img.src = url;
});
});
describe('external', () => {
it('make sure node has _applyProps for react-spring integration', function() {
class App extends React.Component {
render() {
return (
<Stage ref={node => (this.stage = node)} width={300} height={300}>
<Layer>
<Rect fill="red" />
</Layer>
</Stage>
);
}
}
const wrapper = mount(<App />);
const instance = wrapper.instance();
const stage = instance.stage;
expect(typeof stage.findOne('Rect')._applyProps).to.equal('function');
});
});
|
App
|
knapsack.py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Convert knapsack parameters instances into Pauli list
The parameters are a list of values a list of weights and a maximum weight of the knapsack.
In the Knapsack Problem we are given a list of objects that each has a weight and a value.
We are also given a maximum weight we can carry. We need to pick a subset of the objects
so as to maximize the total value without going over the maximum weight.
If we have the weights w[i], the values v[i] and the maximum weight W_max.
We express the solution as a binary array x[i]
where we have a 1 for the items we take in the solution set.
We need to maximize sum(x[i]*v[i]) while respecting W_max >= sum(x[i]*w[i])
"""
import logging
import math
import numpy as np
from qiskit.quantum_info import Pauli
from qiskit.aqua.operators import WeightedPauliOperator
logger = logging.getLogger(__name__)
def get_operator(values, weights, max_weight):
"""
Generate Hamiltonian for the knapsack problem.
Notes:
To build the cost function for the Hamiltonian we add a term S
that will vary with our solution. In order to make it change wit the solution
we enhance X with a number of additional bits X' = [x_0,..x_{n-1},y_{n}..y_{n+m-1}].
The bytes y[i] will be the binary representation of S.
In this way the optimizer will be able to optimize S as well as X.
The cost function is
$$C(X') = M(W_{max} - \\sum_{i=0}^{n-1} x_{i}w_{i} - S)^2 - \\sum_{i}^{n-1} x_{i}v_{i}$$
where S = sum(2**j * y[j]), j goes from n to n+log(W_max).
M is a number large enough to dominate the sum of values.
Because S can only be positive, when W_max >= sum(x[i]*w[i])
the optimizer can find an S (or better the y[j] that compose S)
so that it will take the first term to 0.
This way the function is dominated by the sum of values.
If W_max < sum(x[i]*w[i]) then the first term can never be 0
and, multiplied by a large M, will always dominate the function.
The minimum value of the function will be that where the constraint is respected
and the sum of values is maximized.
Args:
values (list of non-negative integers) : a list of values
weights (list of non-negative integers) : a list of weights
max_weight (non negative integer) : the maximum weight the knapsack can carry
Returns:
WeightedPauliOperator: operator for the Hamiltonian
float: a constant shift for the obj function.
Raises:
ValueError: values and weights have different lengths
ValueError: A value or a weight is negative
ValueError: All values are zero
ValueError: max_weight is negative
"""
if len(values) != len(weights):
raise ValueError("The values and weights must have the same length")
if any(v < 0 for v in values) or any(w < 0 for w in weights):
raise ValueError("The values and weights cannot be negative")
if all(v == 0 for v in values):
raise ValueError("The values cannot all be 0")
if max_weight < 0:
raise ValueError("max_weight cannot be negative")
y_size = int(math.log(max_weight, 2)) + 1 if max_weight > 0 else 1
n = len(values)
num_values = n + y_size
pauli_list = []
shift = 0
# pylint: disable=invalid-name
M = 10 * np.sum(values)
# term for sum(x_i*w_i)**2
for i in range(n):
for j in range(n):
coefficient = -1 * 0.25 * weights[i] * weights[j] * M
pauli_op = _get_pauli_op(num_values, [j])
pauli_list.append([coefficient, pauli_op])
shift -= coefficient
pauli_op = _get_pauli_op(num_values, [i])
pauli_list.append([coefficient, pauli_op])
shift -= coefficient
coefficient = -1 * coefficient
pauli_op = _get_pauli_op(num_values, [i, j])
pauli_list.append([coefficient, pauli_op])
shift -= coefficient
# term for sum(2**j*y_j)**2
for i in range(y_size):
for j in range(y_size):
coefficient = -1 * 0.25 * (2 ** i) * (2 ** j) * M
pauli_op = _get_pauli_op(num_values, [n + j])
pauli_list.append([coefficient, pauli_op])
shift -= coefficient
pauli_op = _get_pauli_op(num_values, [n + i])
pauli_list.append([coefficient, pauli_op])
shift -= coefficient
coefficient = -1 * coefficient
pauli_op = _get_pauli_op(num_values, [n + i, n + j])
pauli_list.append([coefficient, pauli_op])
shift -= coefficient
# term for -2*W_max*sum(x_i*w_i)
for i in range(n):
coefficient = max_weight * weights[i] * M
pauli_op = _get_pauli_op(num_values, [i])
pauli_list.append([coefficient, pauli_op])
shift -= coefficient
# term for -2*W_max*sum(2**j*y_j)
for j in range(y_size):
coefficient = max_weight * (2 ** j) * M
pauli_op = _get_pauli_op(num_values, [n + j])
pauli_list.append([coefficient, pauli_op])
shift -= coefficient
for i in range(n):
for j in range(y_size):
coefficient = -1 * 0.5 * weights[i] * (2 ** j) * M
pauli_op = _get_pauli_op(num_values, [n + j])
pauli_list.append([coefficient, pauli_op])
shift -= coefficient
pauli_op = _get_pauli_op(num_values, [i])
pauli_list.append([coefficient, pauli_op])
shift -= coefficient
coefficient = -1 * coefficient
pauli_op = _get_pauli_op(num_values, [i, n + j])
pauli_list.append([coefficient, pauli_op])
shift -= coefficient
# term for sum(x_i*v_i)
for i in range(n):
coefficient = 0.5 * values[i]
pauli_op = _get_pauli_op(num_values, [i])
pauli_list.append([coefficient, pauli_op])
shift -= coefficient
return WeightedPauliOperator(paulis=pauli_list), shift
def get_solution(x, values):
"""
Get the solution to the knapsack problem
from the bitstring that represents
to the ground state of the Hamiltonian
Args:
x (numpy.ndarray): the ground state of the Hamiltonian.
values (numpy.ndarray): the list of values
Returns:
numpy.ndarray: a bit string that has a '1' at the indexes
corresponding to values that have been taken in the knapsack.
i.e. if the solution has a '1' at index i then
the value values[i] has been taken in the knapsack
"""
return x[:len(values)]
def
|
(solution, values, weights):
"""
Get the total wight and value of the items taken in the knapsack.
Args:
solution (numpy.ndarray) : binary string that represents the solution to the problem.
values (numpy.ndarray) : the list of values
weights (numpy.ndarray) : the list of weights
Returns:
tuple: the total value and weight of the items in the knapsack
"""
value = np.sum(solution * values)
weight = np.sum(solution * weights)
return value, weight
def _get_pauli_op(num_values, indexes):
pauli_x = np.zeros(num_values, dtype=bool)
pauli_z = np.zeros(num_values, dtype=bool)
for i in indexes:
pauli_z[i] = not pauli_z[i]
return Pauli((pauli_z, pauli_x))
|
knapsack_value_weight
|
bu_average_speed_of_answer.py
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class BuAverageSpeedOfAnswer(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
BuAverageSpeedOfAnswer - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'include': 'bool',
'seconds': 'int'
}
self.attribute_map = {
'include': 'include',
'seconds': 'seconds'
}
self._include = None
self._seconds = None
@property
def include(self):
"""
Gets the include of this BuAverageSpeedOfAnswer.
Whether to include average speed of answer (ASA) in the associated configuration
:return: The include of this BuAverageSpeedOfAnswer.
:rtype: bool
"""
return self._include
@include.setter
def include(self, include):
"""
Sets the include of this BuAverageSpeedOfAnswer.
Whether to include average speed of answer (ASA) in the associated configuration
:param include: The include of this BuAverageSpeedOfAnswer.
:type: bool
"""
self._include = include
@property
def seconds(self):
"""
Gets the seconds of this BuAverageSpeedOfAnswer.
The target average speed of answer (ASA) in seconds. Required if include == true
:return: The seconds of this BuAverageSpeedOfAnswer.
:rtype: int
"""
return self._seconds
@seconds.setter
def seconds(self, seconds):
"""
Sets the seconds of this BuAverageSpeedOfAnswer.
The target average speed of answer (ASA) in seconds. Required if include == true
:param seconds: The seconds of this BuAverageSpeedOfAnswer.
:type: int
"""
self._seconds = seconds
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
|
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
return self.__dict__ == other.__dict__
|
ref-mut-struct.rs
|
#![feature(arbitrary_self_types)]
#![allow(non_snake_case)]
use std::pin::Pin;
struct Struct { }
impl Struct {
|
}
fn box_ref_Struct(self: Box<&mut Struct>, f: &u32) -> &u32 {
f //~ ERROR lifetime mismatch
}
fn pin_ref_Struct(self: Pin<&mut Struct>, f: &u32) -> &u32 {
f //~ ERROR lifetime mismatch
}
fn box_box_ref_Struct(self: Box<Box<&mut Struct>>, f: &u32) -> &u32 {
f //~ ERROR lifetime mismatch
}
fn box_pin_ref_Struct(self: Box<Pin<&mut Struct>>, f: &u32) -> &u32 {
f //~ ERROR lifetime mismatch
}
}
fn main() { }
|
// Test using `&mut Struct` explicitly:
fn ref_Struct(self: &mut Struct, f: &u32) -> &u32 {
f //~ ERROR lifetime mismatch
|
error.rs
|
use std::error::Error as StdError;
use std::fmt;
use hyper::Error as HyperError;
use tungstenite::Error as WsError;
use never::Never;
/// Errors that can happen inside warp.
pub struct Error(Box<Kind>);
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Skip showing worthless `Error { .. }` wrapper.
fmt::Debug::fmt(&self.0, f)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0.as_ref() {
Kind::Hyper(ref e) => fmt::Display::fmt(e, f),
Kind::Ws(ref e) => fmt::Display::fmt(e, f),
}
}
}
impl StdError for Error {
fn
|
(&self) -> &str {
match self.0.as_ref() {
Kind::Hyper(ref e) => e.description(),
Kind::Ws(ref e) => e.description(),
}
}
#[allow(deprecated)]
fn cause(&self) -> Option<&StdError> {
match self.0.as_ref() {
Kind::Hyper(ref e) => e.cause(),
Kind::Ws(ref e) => e.cause(),
}
}
}
pub(crate) enum Kind {
Hyper(HyperError),
Ws(WsError),
}
impl fmt::Debug for Kind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Kind::Hyper(ref e) => fmt::Debug::fmt(e, f),
Kind::Ws(ref e) => fmt::Debug::fmt(e, f),
}
}
}
#[doc(hidden)]
impl From<Kind> for Error {
fn from(kind: Kind) -> Error {
Error(Box::new(kind))
}
}
impl From<Never> for Error {
fn from(never: Never) -> Error {
match never {}
}
}
#[test]
fn error_size_of() {
assert_eq!(::std::mem::size_of::<Error>(), ::std::mem::size_of::<usize>());
}
|
description
|
v2ray.go
|
/*
Copyright © 2019 HarryWang <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package cmd
import (
"github.com/spf13/cobra"
"log"
"translate/translate"
)
// VmessCmd represents the Vmess command
//todo 优化终端提示
var VmessCmd = &cobra.Command{
Use: "vmess",
Short: "订阅为vmess协议",
Long: `订阅为vmess协议,默认使用神机规则`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
log.Fatal("args is error")
}
_args.Target = args[0]
err := translate.Run(cmd.CalledAs(), _args)
if err != nil {
log.Fatal(err)
}
},
}
func init() {
rootCmd.AddCommand(VmessCmd)
// Here yo
|
ll define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// VmessCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// VmessCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
|
u wi
|
manage_test.go
|
package manage
import (
"testing"
"time"
"github.com/PagerDuty/go-pagerduty"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
)
type mockAPIClient struct{}
type mockLister interface { //nolint:golint,deadcode,unused
ListI(opts pagerduty.ListIncidentsOptions) (*pagerduty.ListIncidentsResponse, error)
ListS() ([]pagerduty.Service, error)
}
func (c *mockAPIClient) ListS() ([]pagerduty.Service, error) {
return []pagerduty.Service{
pagerduty.Service{
Name: "TestS",
},
}, nil
}
func (c *mockAPIClient) ListI(opts pagerduty.ListIncidentsOptions) (*pagerduty.ListIncidentsResponse, error) {
return &pagerduty.ListIncidentsResponse{
Incidents: []pagerduty.Incident{
pagerduty.Incident{
Title: "TestT",
Service: pagerduty.APIObject{
Summary: "TestS",
},
Urgency: "High",
CreatedAt: time.Now().AddDate(0, 0, -2).Format("2006-01-02T15:04:05Z"),
Assignments: []pagerduty.Assignment{
pagerduty.Assignment{
Assignee: pagerduty.APIObject{
Summary: "TestA",
},
},
},
Teams: []pagerduty.APIObject{
pagerduty.APIObject{
Summary: "TestTeam",
},
},
},
},
}, nil
}
func
|
(t *testing.T) {
pdclient := &mockAPIClient{}
incidents := make(map[string]map[string][]Incident)
var db *gorm.DB
manager := Manage{pdclient, incidents, db}
err := manager.GetServices()
if err != nil {
t.Fatalf("an error '%s' was not expected when get services", err)
}
err = manager.GetIncidents()
if err != nil {
t.Fatalf("an error '%s' was not expected when get incidents", err)
}
assert.Equal(t, incidents["TestS"]["High"][0].Name, "TestT")
assert.Equal(t, incidents["TestS"]["High"][0].Assigne, "TestA")
assert.Equal(t, incidents["TestS"]["High"][0].CreatedAt, "2 days")
}
|
TestGetIncidents
|
__init__.py
|
import difflib
import re
from typing import Optional, Union
from discord.utils import escape_markdown
def
|
(value: str, *, block: Optional[Union[bool, str]] = None):
value = value.replace("`", "\u200b`\u200b")
value = value.replace("\u200b\u200b", "\u200b")
if block is None:
return "``" + value + "``"
lang = "" if block is True else block
return f"```{block}\n" + value + "\n```"
def escape(text: str):
return escape_markdown(re.sub(r"<(a?:\w+:\d+)>", "<\u200b\\1>", text))
def cut_words(text: str, max_len: int, *, end: str = "..."):
words = [""] + re.split(r"(\s+)", text)
result = ""
if len(words[1] + end) > max_len:
return words[1][: max_len - len(end)] + end
for last_sep, word in zip(words[::2], words[1::2]):
if len(result + last_sep + word + end) > max_len:
return result + end
result += last_sep + word
return result
def diff_message(
a: str,
b: str,
*,
max_len: Optional[int] = None,
group_sep: str = "**...**",
cutoff_end: str = " **... [cut off]**",
):
a_words = a.split()
b_words = b.split()
matcher = difflib.SequenceMatcher(autojunk=False)
matcher.set_seqs(a_words, b_words)
groups = []
start = f"{group_sep} "
end = f" {group_sep}"
for group in matcher.get_grouped_opcodes():
parts = []
for op, i1, i2, j1, j2 in group:
if min(i1, j1) == 0:
start = ""
if i2 == len(a_words) or j2 == len(b_words):
end = ""
if op == "delete" or op == "replace":
parts.append(f"~~{escape(' '.join(a_words[i1:i2]))}~~")
if op == "insert" or op == "replace":
parts.append(f"__{escape(' '.join(b_words[j1:j2]))}__")
if op == "equal":
parts.append(escape(" ".join(a_words[i1:i2])))
groups.append(" ".join(parts))
res = start + f" {group_sep} ".join(groups) + end
if max_len:
res = cut_words(res, max_len, end=cutoff_end)
return res
|
wrap_in_code
|
0007_article_issue.py
|
# Generated by Django 3.1.3 on 2021-05-12 18:49
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class
|
(migrations.Migration):
dependencies = [
('jdhapi', '0006_auto_20201127_1355'),
]
operations = [
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(db_column='id', primary_key=True, serialize=False)),
('title', models.CharField(max_length=250)),
('description', models.TextField(blank=True, null=True)),
('creation_date', models.DateTimeField(default=django.utils.timezone.now)),
('publication_date', models.DateTimeField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Article',
fields=[
('abstract', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='jdhapi.abstract')),
('repository_url', models.URLField(blank=True, max_length=254, null=True)),
('notebook_url', models.URLField(blank=True, max_length=254, null=True)),
('notebook_commit_hash', models.CharField(blank=True, default='', help_text='store the git hash', max_length=22)),
('status', models.CharField(choices=[('DRAFT', 'Draft'), ('INTERNAL_REVIEW', 'Internal_review'), ('EXTERNAL_REVIEW', 'External_review'), ('PUBLISHED', 'Published')], default='DRAFT', max_length=15)),
('repository_type', models.CharField(choices=[('GITHUB', 'Github'), ('GITLAB', 'Gitlab')], default='GITHUB', max_length=15)),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jdhapi.issue')),
],
),
]
|
Migration
|
pack.go
|
package jam
import (
"context"
"encoding/json"
|
"os/exec"
"time"
"github.com/gobuffalo/packr/v2/jam/parser"
"github.com/gobuffalo/packr/v2/jam/store"
"github.com/gobuffalo/packr/v2/plog"
"github.com/pkg/errors"
)
// PackOptions ...
type PackOptions struct {
IgnoreImports bool
Legacy bool
StoreCmd string
Roots []string
}
// Pack the roots given + PWD
func Pack(opts PackOptions) error {
pwd, err := os.Getwd()
if err != nil {
return errors.WithStack(err)
}
opts.Roots = append(opts.Roots, pwd)
if err := Clean(opts.Roots...); err != nil {
return errors.WithStack(err)
}
p, err := parser.NewFromRoots(opts.Roots, &parser.RootsOptions{
IgnoreImports: opts.IgnoreImports,
})
if err != nil {
return errors.WithStack(err)
}
boxes, err := p.Run()
if err != nil {
return errors.WithStack(err)
}
// reduce boxes - remove ones we don't want
// MB: current assumption is we want all these
// boxes, just adding a comment suggesting they're
// might be a reason to exclude some
plog.Logger.Debugf("found %d boxes", len(boxes))
if len(opts.StoreCmd) != 0 {
return ShellPack(opts, boxes)
}
var st store.Store = store.NewDisk("", "")
if opts.Legacy {
st = store.NewLegacy()
}
for _, b := range boxes {
if b.Name == store.DISK_GLOBAL_KEY {
continue
}
if err := st.Pack(b); err != nil {
return errors.WithStack(err)
}
}
if cl, ok := st.(io.Closer); ok {
return cl.Close()
}
return nil
}
// ShellPack ...
func ShellPack(opts PackOptions, boxes parser.Boxes) error {
b, err := json.Marshal(boxes)
if err != nil {
return errors.WithStack(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
c := exec.CommandContext(ctx, opts.StoreCmd, string(b))
c.Stdout = os.Stdout
c.Stderr = os.Stderr
return c.Run()
}
// Clean ...
func Clean(args ...string) error {
pwd, err := os.Getwd()
if err != nil {
return errors.WithStack(err)
}
args = append(args, pwd)
for _, root := range args {
if err := store.Clean(root); err != nil {
return errors.WithStack(err)
}
}
return nil
}
|
"io"
"os"
|
model_payment_method_tokens.go
|
/*
* Gr4vy API
*
* Welcome to the Gr4vy API reference documentation. Our API is still very much a work in product and subject to change.
*
* API version: 1.1.0-beta
* Contact: [email protected]
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package Openapi
import (
"encoding/json"
)
// PaymentMethodTokens A list of tokens for a payment method.
type PaymentMethodTokens struct {
// A list of stored tokens for payment methods.
Items *[]PaymentMethodToken `json:"items,omitempty"`
}
// NewPaymentMethodTokens instantiates a new PaymentMethodTokens object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewPaymentMethodTokens() *PaymentMethodTokens {
this := PaymentMethodTokens{}
return &this
}
// NewPaymentMethodTokensWithDefaults instantiates a new PaymentMethodTokens object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewPaymentMethodTokensWithDefaults() *PaymentMethodTokens {
this := PaymentMethodTokens{}
return &this
}
// GetItems returns the Items field value if set, zero value otherwise.
func (o *PaymentMethodTokens) GetItems() []PaymentMethodToken {
if o == nil || o.Items == nil {
var ret []PaymentMethodToken
return ret
}
return *o.Items
}
// GetItemsOk returns a tuple with the Items field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *PaymentMethodTokens) GetItemsOk() (*[]PaymentMethodToken, bool) {
if o == nil || o.Items == nil {
return nil, false
}
return o.Items, true
}
// HasItems returns a boolean if a field has been set.
func (o *PaymentMethodTokens) HasItems() bool {
if o != nil && o.Items != nil {
return true
}
return false
}
// SetItems gets a reference to the given []PaymentMethodToken and assigns it to the Items field.
func (o *PaymentMethodTokens) SetItems(v []PaymentMethodToken) {
o.Items = &v
}
func (o PaymentMethodTokens) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Items != nil
|
return json.Marshal(toSerialize)
}
type NullablePaymentMethodTokens struct {
value *PaymentMethodTokens
isSet bool
}
func (v NullablePaymentMethodTokens) Get() *PaymentMethodTokens {
return v.value
}
func (v *NullablePaymentMethodTokens) Set(val *PaymentMethodTokens) {
v.value = val
v.isSet = true
}
func (v NullablePaymentMethodTokens) IsSet() bool {
return v.isSet
}
func (v *NullablePaymentMethodTokens) Unset() {
v.value = nil
v.isSet = false
}
func NewNullablePaymentMethodTokens(val *PaymentMethodTokens) *NullablePaymentMethodTokens {
return &NullablePaymentMethodTokens{value: val, isSet: true}
}
func (v NullablePaymentMethodTokens) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullablePaymentMethodTokens) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}
|
{
toSerialize["items"] = o.Items
}
|
logplex.rs
|
use crate::{
event::{self, Event},
shutdown::ShutdownSignal,
sources::util::{ErrorMessage, HttpSource},
tls::TlsConfig,
topology::config::{DataType, GlobalOptions, SourceConfig},
};
use bytes05::{buf::BufExt, Bytes};
use chrono::{DateTime, Utc};
use futures01::sync::mpsc;
use serde::{Deserialize, Serialize};
use std::{
io::{BufRead, BufReader},
net::SocketAddr,
str::FromStr,
};
use warp::http::{HeaderMap, StatusCode};
#[derive(Deserialize, Serialize, Debug, Clone)]
pub struct LogplexConfig {
address: SocketAddr,
tls: Option<TlsConfig>,
}
#[derive(Clone, Default)]
struct LogplexSource {}
impl HttpSource for LogplexSource {
fn build_event(&self, body: Bytes, header_map: HeaderMap) -> Result<Vec<Event>, ErrorMessage> {
decode_message(body, header_map)
}
}
#[typetag::serde(name = "logplex")]
impl SourceConfig for LogplexConfig {
fn build(
&self,
_: &str,
_: &GlobalOptions,
shutdown: ShutdownSignal,
out: mpsc::Sender<Event>,
) -> crate::Result<super::Source> {
let source = LogplexSource::default();
source.run(self.address, "events", &self.tls, out, shutdown)
}
fn output_type(&self) -> DataType {
DataType::Log
}
fn source_type(&self) -> &'static str {
"logplex"
}
}
fn decode_message(body: Bytes, header_map: HeaderMap) -> Result<Vec<Event>, ErrorMessage> {
// Deal with headers
let msg_count = match usize::from_str(get_header(&header_map, "Logplex-Msg-Count")?) {
Ok(v) => v,
Err(e) => return Err(header_error_message("Logplex-Msg-Count", &e.to_string())),
};
let frame_id = get_header(&header_map, "Logplex-Frame-Id")?;
let drain_token = get_header(&header_map, "Logplex-Drain-Token")?;
info!(message = "Handling logplex request", %msg_count, %frame_id, %drain_token);
// Deal with body
let events = body_to_events(body);
if events.len() != msg_count {
let error_msg = format!(
"Parsed event count does not match message count header: {} vs {}",
events.len(),
msg_count
);
if cfg!(test) {
panic!(error_msg);
} else {
error!(message = error_msg.as_str());
}
return Err(header_error_message("Logplex-Msg-Count", &error_msg));
}
Ok(events)
}
fn get_header<'a>(header_map: &'a HeaderMap, name: &str) -> Result<&'a str, ErrorMessage> {
if let Some(header_value) = header_map.get(name) {
header_value
.to_str()
.map_err(|e| header_error_message(name, &e.to_string()))
} else {
Err(header_error_message(name, "Header does not exist"))
}
}
fn header_error_message(name: &str, msg: &str) -> ErrorMessage {
ErrorMessage::new(
StatusCode::BAD_REQUEST,
format!("Invalid request header {:?}: {:?}", name, msg),
)
}
fn body_to_events(body: Bytes) -> Vec<Event> {
let rdr = BufReader::new(body.reader());
rdr.lines()
.filter_map(|res| {
res.map_err(|error| error!(message = "Error reading request body", ?error))
.ok()
})
.filter(|s| !s.is_empty())
.map(line_to_event)
.collect()
}
fn line_to_event(line: String) -> Event {
let parts = line.splitn(8, ' ').collect::<Vec<&str>>();
let mut event = if parts.len() == 8 {
let timestamp = parts[2];
let hostname = parts[3];
let app_name = parts[4];
let proc_id = parts[5];
let message = parts[7];
let mut event = Event::from(message);
let log = event.as_mut_log();
if let Ok(ts) = timestamp.parse::<DateTime<Utc>>() {
log.insert(event::log_schema().timestamp_key().clone(), ts);
}
log.insert(event::log_schema().host_key().clone(), hostname);
log.insert("app_name", app_name);
log.insert("proc_id", proc_id);
event
} else {
warn!(
message = "Line didn't match expected logplex format. Forwarding raw message.",
fields = parts.len()
);
Event::from(line)
};
// Add source type
event
.as_mut_log()
.try_insert(event::log_schema().source_type_key(), "logplex");
event
}
#[cfg(test)]
mod tests {
use super::LogplexConfig;
use crate::shutdown::ShutdownSignal;
use crate::{
event::{self, Event},
runtime::Runtime,
test_util::{self, collect_n, runtime},
topology::config::{GlobalOptions, SourceConfig},
};
use chrono::{DateTime, Utc};
use futures::compat::Future01CompatExt;
use futures01::sync::mpsc;
use pretty_assertions::assert_eq;
use std::net::SocketAddr;
fn source(rt: &mut Runtime) -> (mpsc::Receiver<Event>, SocketAddr) {
test_util::trace_init();
let (sender, recv) = mpsc::channel(100);
let address = test_util::next_addr();
rt.spawn(
LogplexConfig { address, tls: None }
.build(
"default",
&GlobalOptions::default(),
ShutdownSignal::noop(),
sender,
)
.unwrap(),
);
(recv, address)
}
async fn send(address: SocketAddr, body: &str) -> u16 {
let len = body.lines().count();
reqwest::Client::new()
.post(&format!("http://{}/events", address))
.header("Logplex-Msg-Count", len)
.header("Logplex-Frame-Id", "frame-foo")
.header("Logplex-Drain-Token", "drain-bar")
.body(body.to_owned())
.send()
.await
.unwrap()
.status()
.as_u16()
}
#[test]
fn logplex_handles_router_log() {
let body = r#"267 <158>1 2020-01-08T22:33:57.353034+00:00 host heroku router - at=info method=GET path="/cart_link" host=lumberjack-store.timber.io request_id=05726858-c44e-4f94-9a20-37df73be9006 fwd="73.75.38.87" dyno=web.1 connect=1ms service=22ms status=304 bytes=656 protocol=http"#;
let mut rt = runtime();
let (rx, addr) = source(&mut rt);
rt.block_on_std(async move {
assert_eq!(200, send(addr, body).await);
let mut events = collect_n(rx, body.lines().count()).compat().await.unwrap();
let event = events.remove(0);
let log = event.as_log();
assert_eq!(
log[&event::log_schema().message_key()],
r#"at=info method=GET path="/cart_link" host=lumberjack-store.timber.io request_id=05726858-c44e-4f94-9a20-37df73be9006 fwd="73.75.38.87" dyno=web.1 connect=1ms service=22ms status=304 bytes=656 protocol=http"#.into()
);
assert_eq!(
log[&event::log_schema().timestamp_key()],
"2020-01-08T22:33:57.353034+00:00"
.parse::<DateTime<Utc>>()
.unwrap()
.into()
);
assert_eq!(log[&event::log_schema().host_key()], "host".into());
assert_eq!(log[event::log_schema().source_type_key()], "logplex".into());
});
}
#[test]
fn logplex_handles_normal_lines() {
let body = "267 <158>1 2020-01-08T22:33:57.353034+00:00 host heroku router - foo bar baz";
let event = super::line_to_event(body.into());
let log = event.as_log();
assert_eq!(
log[&event::log_schema().message_key()],
"foo bar baz".into()
);
assert_eq!(
log[&event::log_schema().timestamp_key()],
"2020-01-08T22:33:57.353034+00:00"
.parse::<DateTime<Utc>>()
.unwrap()
.into()
);
assert_eq!(log[&event::log_schema().host_key()], "host".into());
assert_eq!(log[event::log_schema().source_type_key()], "logplex".into());
}
#[test]
fn logplex_handles_malformed_lines()
|
#[test]
fn logplex_doesnt_blow_up_on_bad_framing() {
let body = "1000000 <158>1 2020-01-08T22:33:57.353034+00:00 host heroku router - i'm not that long";
let event = super::line_to_event(body.into());
let log = event.as_log();
assert_eq!(
log[&event::log_schema().message_key()],
"i'm not that long".into()
);
assert_eq!(
log[&event::log_schema().timestamp_key()],
"2020-01-08T22:33:57.353034+00:00"
.parse::<DateTime<Utc>>()
.unwrap()
.into()
);
assert_eq!(log[&event::log_schema().host_key()], "host".into());
assert_eq!(log[event::log_schema().source_type_key()], "logplex".into());
}
}
|
{
let body = "what am i doing here";
let event = super::line_to_event(body.into());
let log = event.as_log();
assert_eq!(
log[&event::log_schema().message_key()],
"what am i doing here".into()
);
assert!(log.get(&event::log_schema().timestamp_key()).is_some());
assert_eq!(log[event::log_schema().source_type_key()], "logplex".into());
}
|
lib.rs
|
// Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use ir_to_bytecode::{compiler::compile_program, parser::ast};
use lazy_static::lazy_static;
use libra_config::config::{VMConfig, VMPublishingOption};
use libra_crypto::HashValue;
use libra_types::block_metadata::BlockMetadata;
use libra_types::{
account_address::AccountAddress,
byte_array::ByteArray,
transaction::{Script, TransactionArgument, SCRIPT_HASH_LENGTH},
};
use std::{collections::HashSet, iter::FromIterator};
use stdlib::{
stdlib_modules,
transaction_scripts::{
BLOCK_PROLOGUE_TXN_BODY, CREATE_ACCOUNT_TXN_BODY, MINT_TXN_BODY,
PEER_TO_PEER_TRANSFER_TXN_BODY, ROTATE_AUTHENTICATION_KEY_TXN_BODY,
ROTATE_CONSENSUS_PUBKEY_TXN_BODY,
},
};
#[cfg(any(test, feature = "fuzzing"))]
use vm::file_format::Bytecode;
lazy_static! {
static ref PEER_TO_PEER_TXN: Vec<u8> = { compile_script(&PEER_TO_PEER_TRANSFER_TXN_BODY) };
static ref CREATE_ACCOUNT_TXN: Vec<u8> = { compile_script(&CREATE_ACCOUNT_TXN_BODY) };
static ref ROTATE_AUTHENTICATION_KEY_TXN: Vec<u8> =
{ compile_script(&ROTATE_AUTHENTICATION_KEY_TXN_BODY) };
static ref ROTATE_CONSENSUS_PUBKEY_TXN: Vec<u8> =
{ compile_script(&ROTATE_CONSENSUS_PUBKEY_TXN_BODY) };
static ref MINT_TXN: Vec<u8> = { compile_script(&MINT_TXN_BODY) };
static ref BLOCK_PROLOGUE_TXN: Vec<u8> = { compile_script(&BLOCK_PROLOGUE_TXN_BODY) };
}
fn compile_script(body: &ast::Program) -> Vec<u8> {
let compiled_program =
compile_program(AccountAddress::default(), body.clone(), stdlib_modules())
.unwrap()
.0;
let mut script_bytes = vec![];
compiled_program
.script
.serialize(&mut script_bytes)
.unwrap();
script_bytes
}
/// Encode a program transferring `amount` coins from `sender` to `recipient`. Fails if there is no
/// account at the recipient address or if the sender's balance is lower than `amount`.
pub fn encode_transfer_script(recipient: &AccountAddress, amount: u64) -> Script {
Script::new(
PEER_TO_PEER_TXN.clone(),
vec![
TransactionArgument::Address(*recipient),
TransactionArgument::U64(amount),
],
)
}
/// Encode a program transferring `amount` coins from `sender` to `recipient` but padd the output
/// bytecode with unreachable instructions.
#[cfg(any(test, feature = "fuzzing"))]
pub fn encode_transfer_script_with_padding(
recipient: &AccountAddress,
amount: u64,
padding_size: u64,
) -> Script {
let mut script_mut = compile_program(
AccountAddress::default(),
PEER_TO_PEER_TRANSFER_TXN_BODY.clone(),
stdlib_modules(),
)
.unwrap()
.0
.script
.into_inner();
script_mut
.main
.code
.code
.extend(std::iter::repeat(Bytecode::Ret).take(padding_size as usize));
let mut script_bytes = vec![];
script_mut
.freeze()
.unwrap()
.serialize(&mut script_bytes)
.unwrap();
Script::new(
script_bytes,
vec![
TransactionArgument::Address(*recipient),
TransactionArgument::U64(amount),
],
)
}
/// Encode a program creating a fresh account at `account_address` with `initial_balance` coins
/// transferred from the sender's account balance. Fails if there is already an account at
/// `account_address` or if the sender's balance is lower than `initial_balance`.
pub fn encode_create_account_script(
account_address: &AccountAddress,
initial_balance: u64,
) -> Script {
Script::new(
|
CREATE_ACCOUNT_TXN.clone(),
vec![
TransactionArgument::Address(*account_address),
TransactionArgument::U64(initial_balance),
],
)
}
/// Encode a program that rotates the sender's consensus public key to `new_key`.
pub fn encode_rotate_consensus_pubkey_script(new_key: Vec<u8>) -> Script {
Script::new(
ROTATE_CONSENSUS_PUBKEY_TXN.clone(),
vec![TransactionArgument::ByteArray(ByteArray::new(new_key))],
)
}
/// Encode a program that rotates the sender's authentication key to `new_key`.
pub fn rotate_authentication_key_script(new_key: AccountAddress) -> Script {
Script::new(
ROTATE_AUTHENTICATION_KEY_TXN.clone(),
vec![TransactionArgument::ByteArray(ByteArray::new(
new_key.as_ref().to_vec(),
))],
)
}
// TODO: this should go away once we are no longer using it in tests
/// Encode a program creating `amount` coins for sender
pub fn encode_mint_script(sender: &AccountAddress, amount: u64) -> Script {
Script::new(
MINT_TXN.clone(),
vec![
TransactionArgument::Address(*sender),
TransactionArgument::U64(amount),
],
)
}
// TODO: this should go away once we are no longer using it in tests
/// Encode a program creating `amount` coins for sender
pub fn encode_block_prologue_script(block_metadata: BlockMetadata) -> Script {
let (id, timestamp, previous_vote, proposer) = block_metadata.into_inner().unwrap();
Script::new(
BLOCK_PROLOGUE_TXN.clone(),
vec![
TransactionArgument::U64(timestamp),
TransactionArgument::ByteArray(id),
TransactionArgument::ByteArray(previous_vote),
TransactionArgument::Address(proposer),
],
)
}
/// Returns a user friendly mnemonic for the transaction type if the transaction is
/// for a known, white listed, transaction.
pub fn get_transaction_name(code: &[u8]) -> String {
if code == &PEER_TO_PEER_TXN[..] {
return "peer_to_peer_transaction".to_string();
} else if code == &CREATE_ACCOUNT_TXN[..] {
return "create_account_transaction".to_string();
} else if code == &MINT_TXN[..] {
return "mint_transaction".to_string();
} else if code == &ROTATE_AUTHENTICATION_KEY_TXN[..] {
return "rotate_authentication_key_transaction".to_string();
}
"<unknown transaction>".to_string()
}
pub fn allowing_script_hashes() -> Vec<[u8; SCRIPT_HASH_LENGTH]> {
vec![
MINT_TXN.clone(),
PEER_TO_PEER_TXN.clone(),
ROTATE_AUTHENTICATION_KEY_TXN.clone(),
CREATE_ACCOUNT_TXN.clone(),
]
.into_iter()
.map(|s| *HashValue::from_sha3_256(&s).as_ref())
.collect()
}
pub fn default_config() -> VMConfig {
VMConfig {
publishing_options: VMPublishingOption::Locked(HashSet::from_iter(
allowing_script_hashes().into_iter(),
)),
}
}
| |
lib.rs
|
/*!
# typeck
The type checker is responsible for:
1. Determining the type of each expression.
2. Resolving methods and traits.
3. Guaranteeing that most type rules are met. ("Most?", you say, "why most?"
Well, dear reader, read on)
The main entry point is `check_crate()`. Type checking operates in
several major phases:
1. The collect phase first passes over all items and determines their
type, without examining their "innards".
2. Variance inference then runs to compute the variance of each parameter.
3. Coherence checks for overlapping or orphaned impls.
4. Finally, the check phase then checks function bodies and so forth.
Within the check phase, we check each function body one at a time
(bodies of function expressions are checked as part of the
containing function). Inference is used to supply types wherever
they are unknown. The actual checking of a function itself has
several phases (check, regionck, writeback), as discussed in the
documentation for the `check` module.
The type checker is defined into various submodules which are documented
independently:
- astconv: converts the AST representation of types
into the `ty` representation.
- collect: computes the types of each top-level item and enters them into
the `tcx.types` table for later use.
- coherence: enforces coherence rules, builds some tables.
- variance: variance inference
- outlives: outlives inference
- check: walks over function bodies and type checks them, inferring types for
local variables, type parameters, etc as necessary.
- infer: finds the types to use for each type variable such that
all subtyping and assignment constraints are met. In essence, the check
module specifies the constraints, and the infer module solves them.
## Note
This API is completely unstable and subject to change.
*/
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
#![allow(non_camel_case_types)]
#![feature(box_patterns)]
#![feature(box_syntax)]
#![feature(crate_visibility_modifier)]
#![feature(exhaustive_patterns)]
#![feature(in_band_lifetimes)]
#![feature(nll)]
#![feature(rustc_diagnostic_macros)]
#![feature(slice_patterns)]
#![feature(never_type)]
#![recursion_limit="256"]
#![deny(rust_2018_idioms)]
#![deny(internal)]
#![deny(unused_lifetimes)]
#![allow(explicit_outlives_requirements)]
#[macro_use] extern crate log;
#[macro_use] extern crate syntax;
#[macro_use] extern crate rustc;
// N.B., this module needs to be declared first so diagnostics are
// registered before they are used.
mod error_codes;
mod astconv;
mod check;
mod check_unused;
mod coherence;
mod collect;
mod constrained_generic_params;
mod structured_errors;
mod impl_wf_check;
mod namespace;
mod outlives;
mod variance;
use rustc_target::spec::abi::Abi;
use rustc::hir::{self, Node};
use rustc::hir::def_id::{DefId, LOCAL_CRATE};
use rustc::infer::InferOk;
use rustc::lint;
use rustc::middle;
use rustc::session;
use rustc::util::common::ErrorReported;
use rustc::session::config::{EntryFnType, nightly_options};
use rustc::traits::{ObligationCause, ObligationCauseCode, TraitEngine, TraitEngineExt};
use rustc::ty::subst::SubstsRef;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::query::Providers;
use rustc::util;
use syntax_pos::Span;
use util::common::time;
use std::iter;
use astconv::{AstConv, Bounds};
pub use collect::checked_type_of;
pub struct
|
<'tcx> {
substs: SubstsRef<'tcx>,
ty: Ty<'tcx>,
}
fn check_type_alias_enum_variants_enabled<'tcx>(tcx: TyCtxt<'tcx>, span: Span) {
if !tcx.features().type_alias_enum_variants {
let mut err = tcx.sess.struct_span_err(
span,
"enum variants on type aliases are experimental"
);
if nightly_options::is_nightly_build() {
help!(&mut err,
"add `#![feature(type_alias_enum_variants)]` to the \
crate attributes to enable");
}
err.emit();
}
}
fn require_c_abi_if_c_variadic(tcx: TyCtxt<'_>, decl: &hir::FnDecl, abi: Abi, span: Span) {
if decl.c_variadic && !(abi == Abi::C || abi == Abi::Cdecl) {
let mut err = struct_span_err!(tcx.sess, span, E0045,
"C-variadic function must have C or cdecl calling convention");
err.span_label(span, "C-variadics require C or cdecl calling convention").emit();
}
}
fn require_same_types<'tcx>(
tcx: TyCtxt<'tcx>,
cause: &ObligationCause<'tcx>,
expected: Ty<'tcx>,
actual: Ty<'tcx>,
) -> bool {
tcx.infer_ctxt().enter(|ref infcx| {
let param_env = ty::ParamEnv::empty();
let mut fulfill_cx = TraitEngine::new(infcx.tcx);
match infcx.at(&cause, param_env).eq(expected, actual) {
Ok(InferOk { obligations, .. }) => {
fulfill_cx.register_predicate_obligations(infcx, obligations);
}
Err(err) => {
infcx.report_mismatched_types(cause, expected, actual, err).emit();
return false;
}
}
match fulfill_cx.select_all_or_error(infcx) {
Ok(()) => true,
Err(errors) => {
infcx.report_fulfillment_errors(&errors, None, false);
false
}
}
})
}
fn check_main_fn_ty<'tcx>(tcx: TyCtxt<'tcx>, main_def_id: DefId) {
let main_id = tcx.hir().as_local_hir_id(main_def_id).unwrap();
let main_span = tcx.def_span(main_def_id);
let main_t = tcx.type_of(main_def_id);
match main_t.sty {
ty::FnDef(..) => {
if let Some(Node::Item(it)) = tcx.hir().find_by_hir_id(main_id) {
if let hir::ItemKind::Fn(.., ref generics, _) = it.node {
let mut error = false;
if !generics.params.is_empty() {
let msg = "`main` function is not allowed to have generic \
parameters".to_owned();
let label = "`main` cannot have generic parameters".to_string();
struct_span_err!(tcx.sess, generics.span, E0131, "{}", msg)
.span_label(generics.span, label)
.emit();
error = true;
}
if let Some(sp) = generics.where_clause.span() {
struct_span_err!(tcx.sess, sp, E0646,
"`main` function is not allowed to have a `where` clause")
.span_label(sp, "`main` cannot have a `where` clause")
.emit();
error = true;
}
if error {
return;
}
}
}
let actual = tcx.fn_sig(main_def_id);
let expected_return_type = if tcx.lang_items().termination().is_some() {
// we take the return type of the given main function, the real check is done
// in `check_fn`
actual.output().skip_binder()
} else {
// standard () main return type
tcx.mk_unit()
};
let se_ty = tcx.mk_fn_ptr(ty::Binder::bind(
tcx.mk_fn_sig(
iter::empty(),
expected_return_type,
false,
hir::Unsafety::Normal,
Abi::Rust
)
));
require_same_types(
tcx,
&ObligationCause::new(main_span, main_id, ObligationCauseCode::MainFunctionType),
se_ty,
tcx.mk_fn_ptr(actual));
}
_ => {
span_bug!(main_span,
"main has a non-function type: found `{}`",
main_t);
}
}
}
fn check_start_fn_ty<'tcx>(tcx: TyCtxt<'tcx>, start_def_id: DefId) {
let start_id = tcx.hir().as_local_hir_id(start_def_id).unwrap();
let start_span = tcx.def_span(start_def_id);
let start_t = tcx.type_of(start_def_id);
match start_t.sty {
ty::FnDef(..) => {
if let Some(Node::Item(it)) = tcx.hir().find_by_hir_id(start_id) {
if let hir::ItemKind::Fn(.., ref generics, _) = it.node {
let mut error = false;
if !generics.params.is_empty() {
struct_span_err!(tcx.sess, generics.span, E0132,
"start function is not allowed to have type parameters")
.span_label(generics.span,
"start function cannot have type parameters")
.emit();
error = true;
}
if let Some(sp) = generics.where_clause.span() {
struct_span_err!(tcx.sess, sp, E0647,
"start function is not allowed to have a `where` clause")
.span_label(sp, "start function cannot have a `where` clause")
.emit();
error = true;
}
if error {
return;
}
}
}
let se_ty = tcx.mk_fn_ptr(ty::Binder::bind(
tcx.mk_fn_sig(
[
tcx.types.isize,
tcx.mk_imm_ptr(tcx.mk_imm_ptr(tcx.types.u8))
].iter().cloned(),
tcx.types.isize,
false,
hir::Unsafety::Normal,
Abi::Rust
)
));
require_same_types(
tcx,
&ObligationCause::new(start_span, start_id, ObligationCauseCode::StartFunctionType),
se_ty,
tcx.mk_fn_ptr(tcx.fn_sig(start_def_id)));
}
_ => {
span_bug!(start_span,
"start has a non-function type: found `{}`",
start_t);
}
}
}
fn check_for_entry_fn<'tcx>(tcx: TyCtxt<'tcx>) {
match tcx.entry_fn(LOCAL_CRATE) {
Some((def_id, EntryFnType::Main)) => check_main_fn_ty(tcx, def_id),
Some((def_id, EntryFnType::Start)) => check_start_fn_ty(tcx, def_id),
_ => {}
}
}
pub fn provide(providers: &mut Providers<'_>) {
collect::provide(providers);
coherence::provide(providers);
check::provide(providers);
variance::provide(providers);
outlives::provide(providers);
impl_wf_check::provide(providers);
}
pub fn check_crate<'tcx>(tcx: TyCtxt<'tcx>) -> Result<(), ErrorReported> {
tcx.sess.profiler(|p| p.start_activity("type-check crate"));
// this ensures that later parts of type checking can assume that items
// have valid types and not error
tcx.sess.track_errors(|| {
time(tcx.sess, "type collecting", || {
for &module in tcx.hir().krate().modules.keys() {
tcx.ensure().collect_mod_item_types(tcx.hir().local_def_id(module));
}
});
})?;
if tcx.features().rustc_attrs {
tcx.sess.track_errors(|| {
time(tcx.sess, "outlives testing", ||
outlives::test::test_inferred_outlives(tcx));
})?;
}
tcx.sess.track_errors(|| {
time(tcx.sess, "impl wf inference", ||
impl_wf_check::impl_wf_check(tcx));
})?;
tcx.sess.track_errors(|| {
time(tcx.sess, "coherence checking", ||
coherence::check_coherence(tcx));
})?;
if tcx.features().rustc_attrs {
tcx.sess.track_errors(|| {
time(tcx.sess, "variance testing", ||
variance::test::test_variance(tcx));
})?;
}
time(tcx.sess, "wf checking", || check::check_wf_new(tcx))?;
time(tcx.sess, "item-types checking", || {
for &module in tcx.hir().krate().modules.keys() {
tcx.ensure().check_mod_item_types(tcx.hir().local_def_id(module));
}
});
time(tcx.sess, "item-bodies checking", || tcx.typeck_item_bodies(LOCAL_CRATE));
check_unused::check_crate(tcx);
check_for_entry_fn(tcx);
tcx.sess.profiler(|p| p.end_activity("type-check crate"));
if tcx.sess.err_count() == 0 {
Ok(())
} else {
Err(ErrorReported)
}
}
/// A quasi-deprecated helper used in rustdoc and clippy to get
/// the type from a HIR node.
pub fn hir_ty_to_ty<'tcx>(tcx: TyCtxt<'tcx>, hir_ty: &hir::Ty) -> Ty<'tcx> {
// In case there are any projections, etc., find the "environment"
// def-ID that will be used to determine the traits/predicates in
// scope. This is derived from the enclosing item-like thing.
let env_node_id = tcx.hir().get_parent_item(hir_ty.hir_id);
let env_def_id = tcx.hir().local_def_id_from_hir_id(env_node_id);
let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id);
astconv::AstConv::ast_ty_to_ty(&item_cx, hir_ty)
}
pub fn hir_trait_to_predicates<'tcx>(
tcx: TyCtxt<'tcx>,
hir_trait: &hir::TraitRef,
) -> (ty::PolyTraitRef<'tcx>, Bounds<'tcx>) {
// In case there are any projections, etc., find the "environment"
// def-ID that will be used to determine the traits/predicates in
// scope. This is derived from the enclosing item-like thing.
let env_hir_id = tcx.hir().get_parent_item(hir_trait.hir_ref_id);
let env_def_id = tcx.hir().local_def_id_from_hir_id(env_hir_id);
let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id);
let mut bounds = Bounds::default();
let (principal, _) = AstConv::instantiate_poly_trait_ref_inner(
&item_cx, hir_trait, tcx.types.err, &mut bounds, true
);
(principal, bounds)
}
__build_diagnostic_array! { librustc_typeck, DIAGNOSTICS }
|
TypeAndSubsts
|
fast.rs
|
// SPDX-License-Identifier: MIT
// Copyright (C) 2018-present iced project and contributors
pub(super) mod enums;
mod fmt_data;
mod fmt_tbl;
mod mem_size_tbl;
mod options;
mod pseudo_ops_fast;
mod regs;
#[cfg(test)]
mod tests;
mod trait_options;
mod trait_options_fast_fmt;
use crate::formatter::fast::enums::*;
use crate::formatter::fast::fmt_tbl::FMT_DATA;
use crate::formatter::fast::mem_size_tbl::MEM_SIZE_TBL;
pub use crate::formatter::fast::options::*;
use crate::formatter::fast::pseudo_ops_fast::get_pseudo_ops;
use crate::formatter::fast::regs::REGS_TBL;
pub use crate::formatter::fast::trait_options::*;
pub use crate::formatter::fast::trait_options_fast_fmt::*;
use crate::formatter::fmt_utils_all::*;
use crate::formatter::instruction_internal::{self, get_address_size_in_bytes};
use crate::formatter::*;
use crate::iced_constants::IcedConstants;
use crate::*;
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::marker::PhantomData;
use core::{mem, ptr, slice};
use static_assertions::{const_assert, const_assert_eq};
// full fmt'd str = "prefixes mnemonic op0<decorators1>, op1, op2, op3, op4<decorators2>"
// prefixes = "es xacquire xrelease lock notrack repe repne "
// mnemonic = "prefetch_exclusive"
// op sep = ", "
// op = "fpustate108 ptr fs:[rax+zmm31*8+0x12345678]"
// - longest 'xxxx ptr' and longest memory operand
// op = "0x123456789ABCDEF0"
// op = "0x1234:0x12345678"
// op = "zmm31"
// op = "offset symbol (123456789ABCDEF0)"
// - symbol can have any length
// <decorators1> = "{k3}{z}"
// <decorators2> = "{rn-sae}"
// symbol = any length + optional address " (123456789ABCDEF0)"
// full = "es xacquire xrelease lock notrack repe repne prefetch_exclusive fpustate108 ptr fs:[rax+zmm31*8+0x12345678]{k3}{z}, fpustate108 ptr fs:[rax+zmm31*8+0x12345678], fpustate108 ptr fs:[rax+zmm31*8+0x12345678], fpustate108 ptr fs:[rax+zmm31*8+0x12345678], fpustate108 ptr fs:[rax+zmm31*8+0x12345678]{rn-sae}"
// - it's not possible to have 5 `fpustate108 ptr fs:[rax+zmm31*8+0x12345678]` operands
// so we'll never get a formatted string this long if there's no symbol resolver.
#[allow(dead_code)]
const MAX_FMT_INSTR_LEN: usize = {
const MAX_PREFIXES_LEN: usize = "es xacquire xrelease lock notrack repe repne ".len();
const MAX_OPERAND_LEN: usize = "fpustate108 ptr fs:[rax+zmm31*8+0x12345678]".len();
const MAX_DECORATOR1_LEN: usize = "{k3}{z}".len();
const MAX_DECORATOR2_LEN: usize = "{rn-sae}".len();
MAX_PREFIXES_LEN
+ crate::formatter::strings_data::MAX_STRING_LEN
+ MAX_DECORATOR1_LEN
+ (IcedConstants::MAX_OP_COUNT * (2/*", "*/ + MAX_OPERAND_LEN)) - 1/*','*/
+ MAX_DECORATOR2_LEN
};
const_assert_eq!(
MAX_FMT_INSTR_LEN,
// Max mnemonic len
crate::formatter::strings_data::MAX_STRING_LEN
+ "es xacquire xrelease lock notrack repe repne \
fpustate108 ptr fs:[rax+zmm31*8+0x12345678]{k3}{z}, \
fpustate108 ptr fs:[rax+zmm31*8+0x12345678], \
fpustate108 ptr fs:[rax+zmm31*8+0x12345678], \
fpustate108 ptr fs:[rax+zmm31*8+0x12345678], \
fpustate108 ptr fs:[rax+zmm31*8+0x12345678]{rn-sae}"
.len()
);
// Make sure it doesn't grow too much without us knowing about it (eg. if more operands are added)
const_assert!(MAX_FMT_INSTR_LEN < 350);
// Creates a fast string type. It contains one ptr to the len (u8) + valid utf8 string.
// The utf8 string has enough bytes following it (eg. padding or the next fast str instance)
// so it's possible to read up to Self::SIZE bytes without crashing or causing a UB.
// Since the compiler knows that Self::SIZE is a constant, it can optimize the string copy,
// eg. if Self::SIZE == 8, it can read one unaligned u64 and write one unaligned u64.
macro_rules! mk_fast_str_ty {
($ty_name:ident, $size:literal) => {
#[repr(transparent)]
#[derive(Copy, Clone)]
struct $ty_name {
// offset 0: u8, length in bytes of utf8 string
// offset 1: [u8; SIZE] SIZE bytes can be read but only the first len() bytes are part of the string
len_data: *const u8,
}
impl $ty_name {
const SIZE: usize = $size;
#[allow(dead_code)]
fn new(len_data: *const u8) -> Self {
debug_assert!(unsafe { *len_data as usize <= <$ty_name>::SIZE });
Self { len_data }
}
fn len(self) -> usize {
unsafe { *self.len_data as usize }
}
fn utf8_data(self) -> *const u8 {
unsafe { self.len_data.add(1) }
}
#[allow(dead_code)]
fn get_slice(self) -> &'static [u8] {
unsafe { slice::from_raw_parts(self.utf8_data(), self.len()) }
}
}
// SAFETY: The ptr field points to a static immutable u8 array.
unsafe impl Send for $ty_name {}
unsafe impl Sync for $ty_name {}
};
}
// FastString2 isn't used since the code needs a 66h prefix (if target CPU is x86)
mk_fast_str_ty! {FastString4, 4} // ld 4
mk_fast_str_ty! {FastString8, 8} // ld 8
mk_fast_str_ty! {FastString12, 12} // ld 8 + ld 4
mk_fast_str_ty! {FastString16, 16} // ld 16
mk_fast_str_ty! {FastString20, 20} // ld 16 + ld 4
type FastStringMnemonic = FastString20;
type FastStringMemorySize = FastString16;
type FastStringRegister = FastString8;
// It doesn't seem to be possible to const-verify the arg (string literal) in a const fn so we create it with this macro
macro_rules! mk_const_fast_str {
// $fast_ty = FastStringN where N is some integer
// $str = padded string. First byte is the string len and the rest is the utf8 data
// of $fast_ty::SIZE bytes padded with any bytes if needed
($fast_ty:tt, $str:literal) => {{
const STR: &str = $str;
const_assert!(STR.len() == 1 + <$fast_ty>::SIZE);
const_assert!(STR.as_bytes()[0] as usize <= <$fast_ty>::SIZE);
$fast_ty { len_data: STR.as_ptr() }
}};
}
macro_rules! verify_output_has_enough_bytes_left {
($dst:ident, $dst_next_p:ident, $num_bytes:expr) => {
// SAFETY: This is an opt out feature so if this returns `false`, they know what they're doing.
if unsafe { TraitOptions::verify_output_has_enough_bytes_left() } {
// Verify that there's enough bytes left. This should never fail (because we've called
// `$dst.reserve(MAX_FMT_INSTR_LEN)`).
iced_assert!($dst.capacity() - ($dst_next_p as usize - $dst.as_ptr() as usize) >= $num_bytes);
}
};
}
macro_rules! write_fast_str {
// $dst = dest vector (from output.as_mut_vec())
// $dst_next_p = next ptr to write in $dst
// $source_ty = source fast string type
// $source = source fast string instance, must be the same type as $source_ty (compiler will give an error if it's not the same type)
($dst:ident, $dst_next_p:ident, $source_ty:ty, $source:ident) => {{
const DATA_LEN: usize = <$source_ty>::SIZE;
verify_output_has_enough_bytes_left!($dst, $dst_next_p, DATA_LEN);
// SAFETY:
// - $source is a valid utf8 string and it points to DATA_LEN readable bytes
// ($source is never from user code)
// - $source is not in $dst ($source is static)
// - $dst is writable with at least DATA_LEN bytes left (see assert above)
// - $dst is at a valid utf8 char boundary (we're appending bytes)
unsafe {
ptr::copy_nonoverlapping(<$source_ty>::utf8_data($source), $dst_next_p, DATA_LEN);
}
debug_assert!(<$source_ty>::len($source) <= DATA_LEN);
// SAFETY:
// - $source.len() <= DATA_LEN so the new ptr is valid
$dst_next_p = unsafe { $dst_next_p.add(<$source_ty>::len($source)) };
}};
}
static HEX_GROUP2_UPPER: &str = "\
000102030405060708090A0B0C0D0E0F\
101112131415161718191A1B1C1D1E1F\
202122232425262728292A2B2C2D2E2F\
303132333435363738393A3B3C3D3E3F\
404142434445464748494A4B4C4D4E4F\
505152535455565758595A5B5C5D5E5F\
606162636465666768696A6B6C6D6E6F\
707172737475767778797A7B7C7D7E7F\
808182838485868788898A8B8C8D8E8F\
909192939495969798999A9B9C9D9E9F\
A0A1A2A3A4A5A6A7A8A9AAABACADAEAF\
B0B1B2B3B4B5B6B7B8B9BABBBCBDBEBF\
C0C1C2C3C4C5C6C7C8C9CACBCCCDCECF\
D0D1D2D3D4D5D6D7D8D9DADBDCDDDEDF\
E0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF\
F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF\
__"; // Padding so we can read 4 bytes at every index 0-0xFF inclusive
macro_rules! write_fast_hex2_rw_4bytes {
($dst:ident, $dst_next_p:ident, $value:ident, $lower_or_value:ident, $check_limit:literal) => {{
const DATA_LEN: usize = 4;
const REAL_LEN: usize = 2;
if $check_limit {
verify_output_has_enough_bytes_left!($dst, $dst_next_p, DATA_LEN);
}
// We'll read DATA_LEN (4) bytes so we must be able to access up to and including offset 0x201
debug_assert_eq!(HEX_GROUP2_UPPER.len(), 0xFF * REAL_LEN + DATA_LEN);
debug_assert!($value < 0x100);
// $lower_or_value == 0 if we should use uppercase hex digits or 0x2020_2020 to use lowercase hex digits.
// If LE, we need xxxx2020 and if BE, we need 2020xxxx.
debug_assert!($lower_or_value == 0 || $lower_or_value == 0x2020_2020);
// SAFETY:
// - HEX_GROUP2_UPPER is a valid utf8 string and every valid 2-digit hex number
// 0-0xFF can be used as an index * REAL_LEN (2) to read DATA_LEN (4) bytes.
// - $dst is writable with at least DATA_LEN bytes left (see assert above)
// - $dst is at a valid utf8 char boundary (we're appending bytes)
#[allow(trivial_numeric_casts)]
unsafe {
let src_ptr = HEX_GROUP2_UPPER.as_ptr().add(($value as usize) * REAL_LEN) as *const u32;
ptr::write_unaligned($dst_next_p as *mut u32, ptr::read_unaligned(src_ptr) | $lower_or_value);
}
const_assert!(REAL_LEN <= DATA_LEN);
// SAFETY:
// - REAL_LEN <= DATA_LEN so the new ptr is valid since there's at least DATA_LEN bytes available in $dst
$dst_next_p = unsafe { $dst_next_p.add(REAL_LEN) };
}};
}
macro_rules! write_fast_ascii_char {
// $dst = dest vector (from output.as_mut_vec())
// $dst_next_p = next ptr to write in $dst
// $ch = char to write (must be ASCII)
($dst:ident, $dst_next_p:ident, $ch:expr, $check_limit:literal) => {{
const DATA_LEN: usize = 1;
if $check_limit {
verify_output_has_enough_bytes_left!($dst, $dst_next_p, DATA_LEN);
}
#[allow(trivial_numeric_casts)]
{
debug_assert!($ch as u32 <= 0x7F);
}
// SAFETY:
// - $ch is ASCII (valid 1-byte utf8 char)
// - $dst is writable with at least DATA_LEN bytes left (see assert above)
// - $dst is at a valid utf8 char boundary (we're appending bytes)
#[allow(trivial_numeric_casts)]
unsafe {
*$dst_next_p = $ch as u8;
}
// SAFETY: There's at least one byte left so the new ptr is valid
$dst_next_p = unsafe { $dst_next_p.add(1) };
}};
}
macro_rules! write_fast_ascii_char_lit {
// $dst = dest vector (from output.as_mut_vec())
// $dst_next_p = next ptr to write in $dst
// $ch = char to write (must be ASCII)
($dst:ident, $dst_next_p:ident, $ch:tt, $check_limit:literal) => {{
const_assert!($ch as u32 <= 0x7F);
write_fast_ascii_char!($dst, $dst_next_p, $ch, $check_limit);
}};
}
macro_rules! update_vec_len {
// $dst = dest vector (from output.as_mut_vec())
// $dst_next_p = next ptr to write in $dst
($dst:ident, $dst_next_p:ident) => {
// SAFETY:
// - we only write valid utf8 strings and ASCII chars to vec
// - We've written all chars up to but not including $dst_next_p so all visible data have been initialized
// - $dst_next_p points to a valid location inside the vec or at most 1 byte past the last valid byte
unsafe {
$dst.set_len($dst_next_p as usize - $dst.as_ptr() as usize);
}
};
}
macro_rules! use_dst_only_now {
// $dst = dest vector (from output.as_mut_vec())
// $dst_next_p = next ptr to write in $dst
($dst:ident, $dst_next_p:ident) => {
update_vec_len!($dst, $dst_next_p);
// Make sure we don't use it accidentally
#[allow(unused_variables)]
let $dst_next_p: () = ();
};
}
macro_rules! use_dst_next_p_now {
// $dst = dest vector (from output.as_mut_vec())
// $dst_next_p = next ptr to write in $dst
($dst:ident, $dst_next_p:ident) => {
// Need to make sure we have enough bytes available again because we could've
// written a very long symbol name.
$dst.reserve(MAX_FMT_INSTR_LEN);
// Restore variable
let mut $dst_next_p = unsafe { $dst.as_mut_ptr().add($dst.len()) };
};
}
// Macros to safely call the methods (make sure the return value is stored back in dst_next_p)
macro_rules! call_format_register {
($slf:ident, $dst:ident, $dst_next_p:ident, $reg:expr) => {{
$dst_next_p = $slf.format_register($dst, $dst_next_p, $reg);
}};
}
macro_rules! call_format_number {
($slf:ident, $dst:ident, $dst_next_p:ident, $imm:expr) => {{
$dst_next_p = $slf.format_number($dst, $dst_next_p, $imm);
}};
}
macro_rules! call_write_symbol {
($slf:ident, $dst:ident, $dst_next_p:ident, $imm:expr, $sym:expr) => {{
$dst_next_p = $slf.write_symbol($dst, $dst_next_p, $imm, $sym);
}};
}
macro_rules! call_write_symbol2 {
($slf:ident, $dst:ident, $dst_next_p:ident, $imm:expr, $sym:expr, $write_minus_if_signed:literal) => {{
$dst_next_p = $slf.write_symbol2($dst, $dst_next_p, $imm, $sym, $write_minus_if_signed);
}};
}
macro_rules! format_memory_else_block {
($slf:ident, $dst:ident, $dst_next_p:ident, $need_plus:ident, $displ_size:ident, $displ:ident, $addr_size:ident) => {
if !$need_plus || ($displ_size != 0 && $displ != 0) {
if $need_plus {
let c = if $addr_size == 8 {
if $displ < 0 {
$displ = $displ.wrapping_neg();
'-'
} else {
'+'
}
} else if $addr_size == 4 {
if ($displ as i32) < 0 {
$displ = ($displ as i32).wrapping_neg() as u32 as i64;
'-'
} else {
'+'
}
} else {
debug_assert_eq!($addr_size, 2);
if ($displ as i16) < 0 {
$displ = ($displ as i16).wrapping_neg() as u16 as i64;
'-'
} else {
'+'
}
};
write_fast_ascii_char!($dst, $dst_next_p, c, true);
}
call_format_number!($slf, $dst, $dst_next_p, $displ as u64);
}
};
}
// Only one caller has variable args starting from $seg_reg so this is a macro. The compiler is able
// to remove lots of code in all the other cases with literal macro args.
macro_rules! format_memory_code {
($slf:ident, $dst:ident, $dst_next_p:ident, $instruction:ident, $operand:expr, $seg_reg:expr, $base_reg:expr, $index_reg:expr,
$scale:expr, $displ_size:expr, $displ:expr, $addr_size:expr) => {
#[allow(trivial_numeric_casts)]
{
let mut base_reg = $base_reg;
let mut displ_size: u32 = $displ_size;
let mut displ: i64 = $displ;
debug_assert!(get_address_size_in_bytes(base_reg, $index_reg, displ_size, $instruction.code_size()) == $addr_size);
let abs_addr;
if base_reg == Register::RIP {
abs_addr = displ as u64;
if TraitOptions::rip_relative_addresses(&$slf.d.options) {
displ = displ.wrapping_sub($instruction.next_ip() as i64);
} else {
debug_assert_eq!($index_reg, Register::None);
base_reg = Register::None;
}
displ_size = 8;
} else if base_reg == Register::EIP {
abs_addr = displ as u32 as u64;
if TraitOptions::rip_relative_addresses(&$slf.d.options) {
displ = (displ as u32).wrapping_sub($instruction.next_ip32()) as i32 as i64;
} else {
debug_assert_eq!($index_reg, Register::None);
base_reg = Register::None;
}
displ_size = 4;
} else {
abs_addr = displ as u64;
}
let show_mem_size = TraitOptions::always_show_memory_size(&$slf.d.options) || {
let flags = $slf.d.code_flags[$instruction.code() as usize];
(flags & (FastFmtFlags::FORCE_MEM_SIZE as u8)) != 0 || $instruction.is_broadcast()
};
if show_mem_size {
let keywords = $slf.d.all_memory_sizes[$instruction.memory_size() as usize];
write_fast_str!($dst, $dst_next_p, FastStringMemorySize, keywords);
}
let seg_override;
if TraitOptions::always_show_segment_register(&$slf.d.options)
|| ({
seg_override = $instruction.segment_prefix();
seg_override != Register::None
} && !{
let notrack_prefix = seg_override == Register::DS && is_notrack_prefix_branch($instruction.code()) && {
let code_size = $instruction.code_size();
!((code_size == CodeSize::Code16 || code_size == CodeSize::Code32)
&& (base_reg == Register::BP || base_reg == Register::EBP || base_reg == Register::ESP))
};
notrack_prefix
} && (SpecializedFormatter::<TraitOptions>::SHOW_USELESS_PREFIXES
|| show_segment_prefix_bool(Register::None, $instruction, SpecializedFormatter::<TraitOptions>::SHOW_USELESS_PREFIXES)))
{
call_format_register!($slf, $dst, $dst_next_p, $seg_reg);
write_fast_ascii_char_lit!($dst, $dst_next_p, ':', true);
}
write_fast_ascii_char_lit!($dst, $dst_next_p, '[', true);
let mut need_plus = if base_reg != Register::None {
call_format_register!($slf, $dst, $dst_next_p, base_reg);
true
} else {
false
};
if $index_reg != Register::None {
if need_plus {
write_fast_ascii_char_lit!($dst, $dst_next_p, '+', true);
}
need_plus = true;
call_format_register!($slf, $dst, $dst_next_p, $index_reg);
// [rsi] = base reg, [rsi*1] = index reg
if $addr_size != 2 && ($scale != 0 || base_reg == Register::None) {
let scale_str = SCALE_NUMBERS[$scale as usize];
write_fast_str!($dst, $dst_next_p, FastString4, scale_str);
}
}
if TraitOptions::ENABLE_SYMBOL_RESOLVER {
// See OpKind::NearBranch16 in format() for why we clone the symbols
let mut vec: Vec<SymResTextPart<'_>> = Vec::new();
if let Some(ref symbol) = if let Some(ref mut symbol_resolver) = $slf.symbol_resolver {
to_owned(symbol_resolver.symbol($instruction, $operand, Some($operand), abs_addr, $addr_size), &mut vec)
} else {
None
} {
if need_plus {
let c = if (symbol.flags & SymbolFlags::SIGNED) != 0 { '-' } else { '+' };
write_fast_ascii_char!($dst, $dst_next_p, c, true);
} else if (symbol.flags & SymbolFlags::SIGNED) != 0 {
write_fast_ascii_char_lit!($dst, $dst_next_p, '-', true);
}
call_write_symbol2!($slf, $dst, $dst_next_p, abs_addr, symbol, false);
} else {
let addr_size = $addr_size;
format_memory_else_block!($slf, $dst, $dst_next_p, need_plus, displ_size, displ, addr_size);
}
} else {
let addr_size = $addr_size;
format_memory_else_block!($slf, $dst, $dst_next_p, need_plus, displ_size, displ, addr_size);
}
write_fast_ascii_char_lit!($dst, $dst_next_p, ']', true);
}
};
}
macro_rules! call_format_memory {
($slf:ident, $dst:ident, $dst_next_p:ident, $instruction:ident, $operand:ident, $seg_reg:expr, $base_reg:tt,
$index_reg:tt, $scale:tt, $displ_size:tt, $displ:tt, $addr_size:tt $(,)?) => {
// This speeds up SpecializedFormatter but slows down FastFormatter so detect which
// formatter it is. Both paths are tested (same tests).
// This is fugly but the whole point of this formatter is to be fast which can result in ugly code.
{
if TraitOptions::__IS_FAST_FORMATTER {
// Less code: call a method
$dst_next_p = $slf.format_memory(
$dst,
$dst_next_p,
$instruction,
$operand,
$seg_reg,
$base_reg,
$index_reg,
$scale,
$displ_size,
$displ,
$addr_size,
)
} else {
// The options are all most likely hard coded so inline and specialize the 'method call'
format_memory_code!(
$slf,
$dst,
$dst_next_p,
$instruction,
$operand,
$seg_reg,
$base_reg,
$index_reg,
$scale,
$displ_size,
$displ,
$addr_size
)
}
}
};
}
static SCALE_NUMBERS: [FastString4; 4] = [
mk_const_fast_str!(FastString4, "\x02*1 "),
mk_const_fast_str!(FastString4, "\x02*2 "),
mk_const_fast_str!(FastString4, "\x02*4 "),
mk_const_fast_str!(FastString4, "\x02*8 "),
];
const_assert_eq!(RoundingControl::None as u32, 0);
const_assert_eq!(RoundingControl::RoundToNearest as u32, 1);
const_assert_eq!(RoundingControl::RoundDown as u32, 2);
const_assert_eq!(RoundingControl::RoundUp as u32, 3);
const_assert_eq!(RoundingControl::RoundTowardZero as u32, 4);
static RC_STRINGS: [FastString8; 5] = [
mk_const_fast_str!(FastString8, "\x00 "),
mk_const_fast_str!(FastString8, "\x08{rn-sae}"),
mk_const_fast_str!(FastString8, "\x08{rd-sae}"),
mk_const_fast_str!(FastString8, "\x08{ru-sae}"),
mk_const_fast_str!(FastString8, "\x08{rz-sae}"),
];
struct FmtTableData {
mnemonics: Box<[FastStringMnemonic; IcedConstants::CODE_ENUM_COUNT]>,
flags: Box<[u8; IcedConstants::CODE_ENUM_COUNT]>, // FastFmtFlags
}
/// Fast specialized formatter with less formatting options and with a masm-like syntax.
/// Use it if formatting speed is more important than being able to re-assemble formatted instructions.
///
/// The `TraitOptions` generic parameter is a [`SpecializedFormatterTraitOptions`] trait. It can
/// be used to hard code options so the compiler can create a smaller and faster formatter.
/// See also [`FastFormatter`] which allows changing the options at runtime at the cost of
/// being a little bit slower and using a little bit more code.
///
/// This formatter is ~3.3x faster than the gas/intel/masm/nasm formatters (the time includes decoding + formatting).
///
/// [`SpecializedFormatterTraitOptions`]: trait.SpecializedFormatterTraitOptions.html
/// [`FastFormatter`]: type.FastFormatter.html
///
/// # Examples
///
/// ```
/// use iced_x86::*;
///
/// let bytes = b"\x62\xF2\x4F\xDD\x72\x50\x01";
/// let mut decoder = Decoder::new(64, bytes, DecoderOptions::NONE);
/// let instr = decoder.decode();
///
/// // If you like the default options, you can also use DefaultSpecializedFormatterTraitOptions
/// // instead of impl the options trait.
/// struct MyTraitOptions;
/// impl SpecializedFormatterTraitOptions for MyTraitOptions {
/// fn space_after_operand_separator(_options: &FastFormatterOptions) -> bool {
/// // We hard code the value to `true` which means it's not possible to
/// // change this option at runtime, i.e., this will do nothing:
/// // formatter.options_mut().set_space_after_operand_separator(false);
/// true
/// }
/// fn rip_relative_addresses(options: &FastFormatterOptions) -> bool {
/// // Since we return the input, we can change this value at runtime, i.e.,
/// // this works:
/// // formatter.options_mut().set_rip_relative_addresses(false);
/// options.rip_relative_addresses()
/// }
/// }
/// type MyFormatter = SpecializedFormatter<MyTraitOptions>;
///
/// let mut output = String::new();
/// let mut formatter = MyFormatter::new();
/// formatter.format(&instr, &mut output);
/// assert_eq!(output, "vcvtne2ps2bf16 zmm2{k5}{z}, zmm6, dword bcst [rax+0x4]");
/// ```
///
/// # Fastest possible disassembly
///
/// For fastest possible disassembly, you should *not* enable the `db` feature (or you should set [`ENABLE_DB_DW_DD_DQ`] to `false`)
/// and you should also override the unsafe [`verify_output_has_enough_bytes_left()`] and return `false`.
///
/// [`ENABLE_DB_DW_DD_DQ`]: trait.SpecializedFormatterTraitOptions.html#associatedconstant.ENABLE_DB_DW_DD_DQ
/// [`verify_output_has_enough_bytes_left()`]: trait.SpecializedFormatterTraitOptions.html#method.verify_output_has_enough_bytes_left
///
/// ```
/// use iced_x86::*;
///
/// struct MyTraitOptions;
/// impl SpecializedFormatterTraitOptions for MyTraitOptions {
/// // If you never create a db/dw/dd/dq 'instruction', we don't need this feature.
/// const ENABLE_DB_DW_DD_DQ: bool = false;
/// // For a few percent faster code, you can also override `verify_output_has_enough_bytes_left()` and return `false`
/// // unsafe fn verify_output_has_enough_bytes_left() -> bool {
/// // false
/// // }
/// }
/// type MyFormatter = SpecializedFormatter<MyTraitOptions>;
///
/// // Assume this is a big slice and not just one instruction
/// let bytes = b"\x62\xF2\x4F\xDD\x72\x50\x01";
/// let mut decoder = Decoder::new(64, bytes, DecoderOptions::NONE);
///
/// let mut output = String::new();
/// let mut instruction = Instruction::default();
/// let mut formatter = MyFormatter::new();
/// while decoder.can_decode() {
/// decoder.decode_out(&mut instruction);
/// output.clear();
/// formatter.format(&instruction, &mut output);
/// // do something with 'output' here, eg.:
/// // println!("{}", output);
/// }
/// ```
///
/// Also add this to your `Cargo.toml` file:
///
/// ```toml
/// [profile.release]
/// codegen-units = 1
/// lto = true
/// opt-level = 3
/// ```
///
/// # Using a symbol resolver
///
/// The symbol resolver is disabled by default, but it's easy to enable it (or you can just use [`FastFormatter`])
///
/// ```
/// use iced_x86::*;
/// use std::collections::HashMap;
///
/// let bytes = b"\x48\x8B\x8A\xA5\x5A\xA5\x5A";
/// let mut decoder = Decoder::new(64, bytes, DecoderOptions::NONE);
/// let instr = decoder.decode();
///
/// struct MyTraitOptions;
/// impl SpecializedFormatterTraitOptions for MyTraitOptions {
/// const ENABLE_SYMBOL_RESOLVER: bool = true;
/// }
/// type MyFormatter = SpecializedFormatter<MyTraitOptions>;
///
/// struct MySymbolResolver { map: HashMap<u64, String> }
/// impl SymbolResolver for MySymbolResolver {
/// fn symbol(&mut self, _instruction: &Instruction, _operand: u32, _instruction_operand: Option<u32>,
/// address: u64, _address_size: u32) -> Option<SymbolResult> {
/// if let Some(symbol_string) = self.map.get(&address) {
/// // The 'address' arg is the address of the symbol and doesn't have to be identical
/// // to the 'address' arg passed to symbol(). If it's different from the input
/// // address, the formatter will add +N or -N, eg. '[rax+symbol+123]'
/// Some(SymbolResult::with_str(address, symbol_string.as_str()))
/// } else {
/// None
/// }
/// }
/// }
///
/// // Hard code the symbols, it's just an example!😄
/// let mut sym_map: HashMap<u64, String> = HashMap::new();
/// sym_map.insert(0x5AA55AA5, String::from("my_data"));
///
/// let mut output = String::new();
/// let resolver = Box::new(MySymbolResolver { map: sym_map });
/// let mut formatter = MyFormatter::try_with_options(Some(resolver)).unwrap();
/// formatter.format(&instr, &mut output);
/// assert_eq!("mov rcx,[rdx+my_data]", output);
/// ```
#[allow(missing_debug_implementations)]
pub struct SpecializedFormatter<TraitOptions: SpecializedFormatterTraitOptions> {
d: SelfData,
symbol_resolver: Option<Box<dyn SymbolResolver>>,
_required_by_rustc: PhantomData<fn() -> TraitOptions>,
}
impl<TraitOptions: SpecializedFormatterTraitOptions> Default for SpecializedFormatter<TraitOptions> {
#[must_use]
#[inline]
fn default() -> Self {
SpecializedFormatter::<TraitOptions>::new()
}
}
// Read-only data which is needed a couple of times due to borrow checker
struct SelfData {
options: FastFormatterOptions,
all_registers: &'static [FastStringRegister; IcedConstants::REGISTER_ENUM_COUNT],
code_mnemonics: &'static [FastStringMnemonic; IcedConstants::CODE_ENUM_COUNT],
code_flags: &'static [u8; IcedConstants::CODE_ENUM_COUNT],
all_memory_sizes: &'static [FastStringMemorySize; IcedConstants::MEMORY_SIZE_ENUM_COUNT],
}
impl<TraitOptions: SpecializedFormatterTraitOptions> SpecializedFormatter<TraitOptions> {
const SHOW_USELESS_PREFIXES: bool = true;
/// Creates a new instance of this formatter
#[must_use]
#[inline]
#[allow(clippy::unwrap_used)]
pub fn new() -> Self {
// This never panics
SpecializedFormatter::<TraitOptions>::try_with_options(None).unwrap()
}
/// Creates a new instance of this formatter
///
/// # Errors
///
/// Fails if [`TraitOptions::ENABLE_SYMBOL_RESOLVER`] is `false` and `symbol_resolver.is_some()`
///
/// [`TraitOptions::ENABLE_SYMBOL_RESOLVER`]: trait.SpecializedFormatterTraitOptions.html#associatedconstant.ENABLE_SYMBOL_RESOLVER
///
/// # Arguments
///
/// - `symbol_resolver`: Symbol resolver or `None`
#[allow(clippy::missing_inline_in_public_items)]
pub fn try_with_options(symbol_resolver: Option<Box<dyn SymbolResolver>>) -> Result<Self, IcedError> {
if !TraitOptions::ENABLE_SYMBOL_RESOLVER && symbol_resolver.is_some() {
Err(IcedError::new(concat!(stringify!(TraitOptions::ENABLE_SYMBOL_RESOLVER), " is disabled so symbol resolvers aren't supported")))
} else {
Ok(Self {
d: SelfData {
options: FastFormatterOptions::new(),
all_registers: &*REGS_TBL,
code_mnemonics: &FMT_DATA.mnemonics,
code_flags: &FMT_DATA.flags,
all_memory_sizes: &*MEM_SIZE_TBL,
},
symbol_resolver,
_required_by_rustc: PhantomData,
})
}
}
/// Gets the formatter options (immutable)
///
/// Note that the `TraitOptions` generic parameter can override any option and hard code them,
/// see [`SpecializedFormatterTraitOptions`]
///
/// [`SpecializedFormatterTraitOptions`]: trait.SpecializedFormatterTraitOptions.html
#[must_use]
#[inline]
pub fn opt
|
elf) -> &FastFormatterOptions {
&self.d.options
}
/// Gets the formatter options (mutable)
///
/// Note that the `TraitOptions` generic parameter can override any option and hard code them,
/// see [`SpecializedFormatterTraitOptions`]
///
/// [`SpecializedFormatterTraitOptions`]: trait.SpecializedFormatterTraitOptions.html
#[must_use]
#[inline]
pub fn options_mut(&mut self) -> &mut FastFormatterOptions {
&mut self.d.options
}
/// Formats the whole instruction: prefixes, mnemonic, operands
///
/// # Arguments
///
/// - `instruction`: Instruction
/// - `output`: Output
#[allow(clippy::missing_inline_in_public_items)]
#[allow(clippy::let_unit_value)]
#[allow(clippy::useless_let_if_seq)]
pub fn format(&mut self, instruction: &Instruction, output: &mut String) {
// SAFETY: We only append data that come from a `&str`, a `String` or ASCII chars so the data is always valid utf8
let dst = unsafe { output.as_mut_vec() };
// The code assumes there's enough bytes (or it will panic) so reserve enough bytes here
dst.reserve(MAX_FMT_INSTR_LEN);
// SAFETY:
// - ptr is in bounds (after last valid byte)
// - it's reloaded when using 'dst' to write to the vector
let mut dst_next_p = unsafe { dst.as_mut_ptr().add(dst.len()) };
let code = instruction.code();
let mut mnemonic = self.d.code_mnemonics[code as usize];
let mut op_count = instruction.op_count();
if TraitOptions::use_pseudo_ops(&self.d.options) {
let flags = self.d.code_flags[code as usize];
let pseudo_ops_num = flags >> FastFmtFlags::PSEUDO_OPS_KIND_SHIFT;
if pseudo_ops_num != 0 && instruction.op_kind(op_count - 1) == OpKind::Immediate8 {
let mut index = instruction.immediate8() as usize;
// SAFETY: the generator generates only valid values (1-based)
let pseudo_ops_kind: PseudoOpsKind = unsafe { mem::transmute(pseudo_ops_num - 1) };
let pseudo_ops = get_pseudo_ops(pseudo_ops_kind);
if pseudo_ops_kind == PseudoOpsKind::pclmulqdq || pseudo_ops_kind == PseudoOpsKind::vpclmulqdq {
if index <= 1 {
// nothing
} else if index == 0x10 {
index = 2;
} else if index == 0x11 {
index = 3;
} else {
index = usize::MAX;
}
}
if let Some(&pseudo_op_mnemonic) = pseudo_ops.get(index) {
mnemonic = pseudo_op_mnemonic;
op_count -= 1;
}
}
}
let prefix_seg = instruction_internal::internal_segment_prefix_raw(instruction);
const_assert_eq!(Register::None as u32, 0);
if prefix_seg < 6 || instruction_internal::internal_has_any_of_lock_rep_repne_prefix(instruction) != 0 {
const DS_REG: u32 = Register::DS as u32 - Register::ES as u32;
let has_notrack_prefix = prefix_seg == DS_REG && is_notrack_prefix_branch(code);
if !has_notrack_prefix && prefix_seg < 6 && SpecializedFormatter::<TraitOptions>::show_segment_prefix(instruction, op_count) {
let prefix_seg = unsafe { mem::transmute((Register::ES as u32 + prefix_seg) as RegisterUnderlyingType) };
call_format_register!(self, dst, dst_next_p, prefix_seg);
write_fast_ascii_char_lit!(dst, dst_next_p, ' ', true);
}
let mut has_xacquire_xrelease = false;
if instruction.has_xacquire_prefix() {
const FAST_STR: FastString12 = mk_const_fast_str!(FastString12, "\x09xacquire ");
write_fast_str!(dst, dst_next_p, FastString12, FAST_STR);
has_xacquire_xrelease = true;
}
if instruction.has_xrelease_prefix() {
const FAST_STR: FastString12 = mk_const_fast_str!(FastString12, "\x09xrelease ");
write_fast_str!(dst, dst_next_p, FastString12, FAST_STR);
has_xacquire_xrelease = true;
}
if instruction.has_lock_prefix() {
const FAST_STR: FastString8 = mk_const_fast_str!(FastString8, "\x05lock ");
write_fast_str!(dst, dst_next_p, FastString8, FAST_STR);
}
if has_notrack_prefix {
const FAST_STR: FastString8 = mk_const_fast_str!(FastString8, "\x08notrack ");
write_fast_str!(dst, dst_next_p, FastString8, FAST_STR);
}
if !has_xacquire_xrelease {
if instruction.has_repe_prefix()
&& (SpecializedFormatter::<TraitOptions>::SHOW_USELESS_PREFIXES
|| show_rep_or_repe_prefix_bool(code, SpecializedFormatter::<TraitOptions>::SHOW_USELESS_PREFIXES))
{
if is_repe_or_repne_instruction(code) {
const FAST_STR: FastString8 = mk_const_fast_str!(FastString8, "\x05repe ");
write_fast_str!(dst, dst_next_p, FastString8, FAST_STR);
} else {
const FAST_STR: FastString4 = mk_const_fast_str!(FastString4, "\x04rep ");
write_fast_str!(dst, dst_next_p, FastString4, FAST_STR);
}
}
if instruction.has_repne_prefix() {
if (Code::Retnw_imm16 <= code && code <= Code::Retnq)
|| (Code::Call_rel16 <= code && code <= Code::Jmp_rel32_64)
|| (Code::Call_rm16 <= code && code <= Code::Call_rm64)
|| (Code::Jmp_rm16 <= code && code <= Code::Jmp_rm64)
|| code.is_jcc_short_or_near()
{
const FAST_STR: FastString4 = mk_const_fast_str!(FastString4, "\x04bnd ");
write_fast_str!(dst, dst_next_p, FastString4, FAST_STR);
} else if SpecializedFormatter::<TraitOptions>::SHOW_USELESS_PREFIXES
|| show_repne_prefix_bool(code, SpecializedFormatter::<TraitOptions>::SHOW_USELESS_PREFIXES)
{
const FAST_STR: FastString8 = mk_const_fast_str!(FastString8, "\x06repne ");
write_fast_str!(dst, dst_next_p, FastString8, FAST_STR);
}
}
}
}
write_fast_str!(dst, dst_next_p, FastStringMnemonic, mnemonic);
let is_declare_data;
let declare_data_kind = if !TraitOptions::ENABLE_DB_DW_DD_DQ {
is_declare_data = false;
OpKind::Register
} else if (code as u32).wrapping_sub(Code::DeclareByte as u32) <= (Code::DeclareQword as u32 - Code::DeclareByte as u32) {
op_count = instruction.declare_data_len() as u32;
is_declare_data = true;
match code {
Code::DeclareByte => OpKind::Immediate8,
Code::DeclareWord => OpKind::Immediate16,
Code::DeclareDword => OpKind::Immediate32,
_ => {
debug_assert_eq!(code, Code::DeclareQword);
OpKind::Immediate64
}
}
} else {
is_declare_data = false;
OpKind::Register
};
if op_count > 0 {
write_fast_ascii_char_lit!(dst, dst_next_p, ' ', true);
let mut operand = 0;
loop {
let imm8;
let imm16;
let imm32;
let imm64;
let imm_size;
let op_kind = if TraitOptions::ENABLE_DB_DW_DD_DQ && is_declare_data { declare_data_kind } else { instruction.op_kind(operand) };
// Share as much code as possible so put these in macros
macro_rules! fmt_near_branch {
($slf:ident, $dst:ident, $dst_next_p:ident, $instruction:ident, $imm_size:expr, $imm:ident) => {{
if TraitOptions::ENABLE_SYMBOL_RESOLVER {
// PERF: Symbols should be rare when using fast fmt with a symbol resolver so clone
// the symbol (forced by borrowck).
// This results in slightly faster code when we do NOT support a symbol resolver since
// we don't need to pass in the options to various methods and can instead pass in &Self
// (i.e., use a method instead of a func).
let mut vec: Vec<SymResTextPart<'_>> = Vec::new();
if let Some(ref symbol) = if let Some(ref mut symbol_resolver) = $slf.symbol_resolver {
to_owned(symbol_resolver.symbol($instruction, operand, Some(operand), $imm, $imm_size), &mut vec)
} else {
None
} {
call_write_symbol!($slf, $dst, $dst_next_p, $imm, symbol);
} else {
call_format_number!($slf, $dst, $dst_next_p, $imm);
}
} else {
call_format_number!($slf, $dst, $dst_next_p, $imm);
}
}};
}
macro_rules! fmt_far_branch {
($slf:ident, $dst:ident, $dst_next_p:ident, $instruction:ident, $op_kind:ident, $imm_size_ident:ident, $imm64:ident) => {{
if $op_kind == OpKind::FarBranch32 {
$imm_size_ident = 4;
$imm64 = $instruction.far_branch32() as u64;
} else {
$imm_size_ident = 2;
$imm64 = $instruction.far_branch16() as u64;
}
if TraitOptions::ENABLE_SYMBOL_RESOLVER {
// See fmt_near_branch!() above for why we clone the symbols
let mut vec: Vec<SymResTextPart<'_>> = Vec::new();
let mut vec2: Vec<SymResTextPart<'_>> = Vec::new();
if let Some(ref symbol) = if let Some(ref mut symbol_resolver) = $slf.symbol_resolver {
to_owned(
symbol_resolver.symbol($instruction, operand, Some(operand), $imm64 as u32 as u64, $imm_size_ident),
&mut vec,
)
} else {
None
} {
debug_assert!(operand + 1 == 1);
let selector_symbol = if let Some(ref mut symbol_resolver) = $slf.symbol_resolver {
to_owned(
symbol_resolver.symbol(
$instruction,
operand + 1,
Some(operand),
$instruction.far_branch_selector() as u64,
2,
),
&mut vec2,
)
} else {
None
};
if let Some(ref selector_symbol) = selector_symbol {
call_write_symbol!($slf, $dst, $dst_next_p, $instruction.far_branch_selector() as u64, selector_symbol);
} else {
call_format_number!($slf, $dst, $dst_next_p, $instruction.far_branch_selector() as u64);
}
write_fast_ascii_char_lit!(dst, dst_next_p, ':', true);
call_write_symbol!($slf, $dst, $dst_next_p, $imm64, symbol);
} else {
call_format_number!($slf, $dst, $dst_next_p, $instruction.far_branch_selector() as u64);
write_fast_ascii_char_lit!(dst, dst_next_p, ':', true);
call_format_number!($slf, $dst, $dst_next_p, $imm64);
}
} else {
call_format_number!($slf, $dst, $dst_next_p, $instruction.far_branch_selector() as u64);
write_fast_ascii_char_lit!(dst, dst_next_p, ':', true);
call_format_number!($slf, $dst, $dst_next_p, $imm64);
}
}};
}
macro_rules! fmt_imm {
($slf:ident, $dst:ident, $dst_next_p:ident, $instruction:ident, $imm:ident, $imm_size:literal) => {
#[allow(trivial_numeric_casts)]
{
if TraitOptions::ENABLE_SYMBOL_RESOLVER {
// See fmt_near_branch!() above for why we clone the symbols
let mut vec: Vec<SymResTextPart<'_>> = Vec::new();
if let Some(ref symbol) = if let Some(ref mut symbol_resolver) = $slf.symbol_resolver {
to_owned(symbol_resolver.symbol($instruction, operand, Some(operand), $imm as u64, $imm_size), &mut vec)
} else {
None
} {
if (symbol.flags & SymbolFlags::RELATIVE) == 0 {
const FAST_STR: FastString8 = mk_const_fast_str!(FastString8, "\x07offset ");
write_fast_str!($dst, $dst_next_p, FastString8, FAST_STR);
}
call_write_symbol!($slf, $dst, $dst_next_p, $imm as u64, symbol);
} else {
call_format_number!($slf, $dst, $dst_next_p, $imm as u64);
}
} else {
call_format_number!($slf, $dst, $dst_next_p, $imm as u64);
}
}
};
}
macro_rules! fmt_register {
() => {{
call_format_register!(self, dst, dst_next_p, instruction_internal::internal_op_register(instruction, operand))
}};
}
macro_rules! fmt_far_br_16_32 {
() => {{
fmt_far_branch!(self, dst, dst_next_p, instruction, op_kind, imm_size, imm64)
}};
}
macro_rules! fmt_memory_seg_si {
() => {{
call_format_memory!(
self,
dst,
dst_next_p,
instruction,
operand,
(instruction.memory_segment()),
(Register::SI),
(Register::None),
0,
0,
0,
2,
)
}};
}
macro_rules! fmt_memory_seg_esi {
() => {{
call_format_memory!(
self,
dst,
dst_next_p,
instruction,
operand,
(instruction.memory_segment()),
(Register::ESI),
(Register::None),
0,
0,
0,
4,
)
}};
}
macro_rules! fmt_memory_seg_rsi {
() => {{
call_format_memory!(
self,
dst,
dst_next_p,
instruction,
operand,
(instruction.memory_segment()),
(Register::RSI),
(Register::None),
0,
0,
0,
8,
)
}};
}
macro_rules! fmt_memory_seg_di {
() => {{
call_format_memory!(
self,
dst,
dst_next_p,
instruction,
operand,
(instruction.memory_segment()),
(Register::DI),
(Register::None),
0,
0,
0,
2,
)
}};
}
macro_rules! fmt_memory_seg_edi {
() => {{
call_format_memory!(
self,
dst,
dst_next_p,
instruction,
operand,
(instruction.memory_segment()),
(Register::EDI),
(Register::None),
0,
0,
0,
4,
)
}};
}
macro_rules! fmt_memory_seg_rdi {
() => {{
call_format_memory!(
self,
dst,
dst_next_p,
instruction,
operand,
(instruction.memory_segment()),
(Register::RDI),
(Register::None),
0,
0,
0,
8,
)
}};
}
macro_rules! fmt_memory_es_di {
() => {{
call_format_memory!(self, dst, dst_next_p, instruction, operand, (Register::ES), (Register::DI), (Register::None), 0, 0, 0, 2)
}};
}
macro_rules! fmt_memory_es_edi {
() => {{
call_format_memory!(
self,
dst,
dst_next_p,
instruction,
operand,
(Register::ES),
(Register::EDI),
(Register::None),
0,
0,
0,
4
)
}};
}
macro_rules! fmt_memory_es_rdi {
() => {{
call_format_memory!(
self,
dst,
dst_next_p,
instruction,
operand,
(Register::ES),
(Register::RDI),
(Register::None),
0,
0,
0,
8
)
}};
}
macro_rules! fmt_memory {
() => {{
let displ_size = instruction.memory_displ_size();
let base_reg = instruction.memory_base();
let mut index_reg = instruction.memory_index();
let addr_size = get_address_size_in_bytes(base_reg, index_reg, displ_size, instruction.code_size());
let displ =
if addr_size == 8 { instruction.memory_displacement64() as i64 } else { instruction.memory_displacement32() as i64 };
if code == Code::Xlat_m8 {
index_reg = Register::None;
}
call_format_memory!(
self,
dst,
dst_next_p,
instruction,
operand,
(instruction.memory_segment()),
base_reg,
index_reg,
(instruction_internal::internal_get_memory_index_scale(instruction)),
displ_size,
displ,
addr_size,
);
}};
}
// This speeds up SpecializedFormatter since every option is hard coded, but makes FastFormatter
// slower because every option is dynamic (more code). Detect FastFormatter and generate smaller
// and faster code. Both paths are tested (same tests).
// The whole point of this formatter is to be fast so unfortunately it can result in fugly code...
if TraitOptions::__IS_FAST_FORMATTER {
match op_kind {
OpKind::Register => fmt_register!(),
OpKind::NearBranch16 | OpKind::NearBranch32 | OpKind::NearBranch64 => {
if op_kind == OpKind::NearBranch64 {
imm_size = 8;
imm64 = instruction.near_branch64();
} else if op_kind == OpKind::NearBranch32 {
imm_size = 4;
imm64 = instruction.near_branch32() as u64;
} else {
imm_size = 2;
imm64 = instruction.near_branch16() as u64;
}
fmt_near_branch!(self, dst, dst_next_p, instruction, imm_size, imm64);
}
OpKind::FarBranch16 | OpKind::FarBranch32 => fmt_far_br_16_32!(),
OpKind::Immediate8 | OpKind::Immediate8_2nd => {
if TraitOptions::ENABLE_DB_DW_DD_DQ && is_declare_data {
imm8 = instruction.get_declare_byte_value(operand as usize);
} else if op_kind == OpKind::Immediate8 {
imm8 = instruction.immediate8();
} else {
debug_assert_eq!(op_kind, OpKind::Immediate8_2nd);
imm8 = instruction.immediate8_2nd();
}
fmt_imm!(self, dst, dst_next_p, instruction, imm8, 1);
}
OpKind::Immediate16 | OpKind::Immediate8to16 => {
if TraitOptions::ENABLE_DB_DW_DD_DQ && is_declare_data {
imm16 = instruction.get_declare_word_value(operand as usize);
} else if op_kind == OpKind::Immediate16 {
imm16 = instruction.immediate16();
} else {
debug_assert_eq!(op_kind, OpKind::Immediate8to16);
imm16 = instruction.immediate8to16() as u16;
}
fmt_imm!(self, dst, dst_next_p, instruction, imm16, 2)
}
OpKind::Immediate32 | OpKind::Immediate8to32 => {
if TraitOptions::ENABLE_DB_DW_DD_DQ && is_declare_data {
imm32 = instruction.get_declare_dword_value(operand as usize);
} else if op_kind == OpKind::Immediate32 {
imm32 = instruction.immediate32();
} else {
debug_assert_eq!(op_kind, OpKind::Immediate8to32);
imm32 = instruction.immediate8to32() as u32;
}
fmt_imm!(self, dst, dst_next_p, instruction, imm32, 4)
}
OpKind::Immediate64 | OpKind::Immediate8to64 | OpKind::Immediate32to64 => {
if TraitOptions::ENABLE_DB_DW_DD_DQ && is_declare_data {
imm64 = instruction.get_declare_qword_value(operand as usize);
} else if op_kind == OpKind::Immediate32to64 {
imm64 = instruction.immediate32to64() as u64;
} else if op_kind == OpKind::Immediate8to64 {
imm64 = instruction.immediate8to64() as u64;
} else {
debug_assert_eq!(op_kind, OpKind::Immediate64);
imm64 = instruction.immediate64();
}
fmt_imm!(self, dst, dst_next_p, instruction, imm64, 8)
}
OpKind::MemorySegSI => fmt_memory_seg_si!(),
OpKind::MemorySegESI => fmt_memory_seg_esi!(),
OpKind::MemorySegRSI => fmt_memory_seg_rsi!(),
OpKind::MemorySegDI => fmt_memory_seg_di!(),
OpKind::MemorySegEDI => fmt_memory_seg_edi!(),
OpKind::MemorySegRDI => fmt_memory_seg_rdi!(),
OpKind::MemoryESDI => fmt_memory_es_di!(),
OpKind::MemoryESEDI => fmt_memory_es_edi!(),
OpKind::MemoryESRDI => fmt_memory_es_rdi!(),
OpKind::Memory => fmt_memory!(),
}
} else {
match op_kind {
OpKind::Register => fmt_register!(),
OpKind::NearBranch16 => {
imm64 = instruction.near_branch16() as u64;
fmt_near_branch!(self, dst, dst_next_p, instruction, 2, imm64);
}
OpKind::NearBranch32 => {
imm64 = instruction.near_branch32() as u64;
fmt_near_branch!(self, dst, dst_next_p, instruction, 4, imm64);
}
OpKind::NearBranch64 => {
imm64 = instruction.near_branch64();
fmt_near_branch!(self, dst, dst_next_p, instruction, 8, imm64);
}
OpKind::FarBranch16 | OpKind::FarBranch32 => fmt_far_br_16_32!(),
OpKind::Immediate8 => {
imm8 = if TraitOptions::ENABLE_DB_DW_DD_DQ && is_declare_data {
instruction.get_declare_byte_value(operand as usize)
} else {
instruction.immediate8()
};
fmt_imm!(self, dst, dst_next_p, instruction, imm8, 1);
}
OpKind::Immediate8_2nd => {
imm8 = instruction.immediate8_2nd();
fmt_imm!(self, dst, dst_next_p, instruction, imm8, 1);
}
OpKind::Immediate16 => {
imm16 = if TraitOptions::ENABLE_DB_DW_DD_DQ && is_declare_data {
instruction.get_declare_word_value(operand as usize)
} else {
instruction.immediate16()
};
fmt_imm!(self, dst, dst_next_p, instruction, imm16, 2)
}
OpKind::Immediate8to16 => {
imm16 = instruction.immediate8to16() as u16;
fmt_imm!(self, dst, dst_next_p, instruction, imm16, 2)
}
OpKind::Immediate32 => {
imm32 = if TraitOptions::ENABLE_DB_DW_DD_DQ && is_declare_data {
instruction.get_declare_dword_value(operand as usize)
} else {
instruction.immediate32()
};
fmt_imm!(self, dst, dst_next_p, instruction, imm32, 4)
}
OpKind::Immediate8to32 => {
imm32 = instruction.immediate8to32() as u32;
fmt_imm!(self, dst, dst_next_p, instruction, imm32, 4)
}
OpKind::Immediate64 => {
imm64 = if TraitOptions::ENABLE_DB_DW_DD_DQ && is_declare_data {
instruction.get_declare_qword_value(operand as usize)
} else {
instruction.immediate64()
};
fmt_imm!(self, dst, dst_next_p, instruction, imm64, 8)
}
OpKind::Immediate8to64 => {
imm64 = instruction.immediate8to64() as u64;
fmt_imm!(self, dst, dst_next_p, instruction, imm64, 8)
}
OpKind::Immediate32to64 => {
imm64 = instruction.immediate32to64() as u64;
fmt_imm!(self, dst, dst_next_p, instruction, imm64, 8)
}
OpKind::MemorySegSI => fmt_memory_seg_si!(),
OpKind::MemorySegESI => fmt_memory_seg_esi!(),
OpKind::MemorySegRSI => fmt_memory_seg_rsi!(),
OpKind::MemorySegDI => fmt_memory_seg_di!(),
OpKind::MemorySegEDI => fmt_memory_seg_edi!(),
OpKind::MemorySegRDI => fmt_memory_seg_rdi!(),
OpKind::MemoryESDI => fmt_memory_es_di!(),
OpKind::MemoryESEDI => fmt_memory_es_edi!(),
OpKind::MemoryESRDI => fmt_memory_es_rdi!(),
OpKind::Memory => fmt_memory!(),
}
}
if operand == 0 && instruction_internal::internal_has_op_mask_or_zeroing_masking(instruction) {
if instruction.has_op_mask() {
write_fast_ascii_char_lit!(dst, dst_next_p, '{', true);
call_format_register!(self, dst, dst_next_p, instruction.op_mask());
write_fast_ascii_char_lit!(dst, dst_next_p, '}', true);
}
if instruction.zeroing_masking() {
const FAST_STR: FastString4 = mk_const_fast_str!(FastString4, "\x03{z} ");
write_fast_str!(dst, dst_next_p, FastString4, FAST_STR);
}
}
operand += 1;
if operand >= op_count {
break;
}
if TraitOptions::space_after_operand_separator(&self.d.options) {
const FAST_STR: FastString4 = mk_const_fast_str!(FastString4, "\x02, ");
write_fast_str!(dst, dst_next_p, FastString4, FAST_STR);
} else {
write_fast_ascii_char_lit!(dst, dst_next_p, ',', true);
}
}
if instruction_internal::internal_has_rounding_control_or_sae(instruction) {
let rc = instruction.rounding_control();
if rc != RoundingControl::None {
let fast_str = RC_STRINGS[rc as usize];
write_fast_str!(dst, dst_next_p, FastString8, fast_str);
} else {
debug_assert!(instruction.suppress_all_exceptions());
const FAST_STR: FastString8 = mk_const_fast_str!(FastString8, "\x05{sae} ");
write_fast_str!(dst, dst_next_p, FastString8, FAST_STR);
}
}
}
update_vec_len!(dst, dst_next_p);
}
// Only one caller so inline it
#[must_use]
#[inline]
fn show_segment_prefix(instruction: &Instruction, op_count: u32) -> bool {
for i in 0..op_count {
match instruction.op_kind(i) {
OpKind::Register
| OpKind::NearBranch16
| OpKind::NearBranch32
| OpKind::NearBranch64
| OpKind::FarBranch16
| OpKind::FarBranch32
| OpKind::Immediate8
| OpKind::Immediate8_2nd
| OpKind::Immediate16
| OpKind::Immediate32
| OpKind::Immediate64
| OpKind::Immediate8to16
| OpKind::Immediate8to32
| OpKind::Immediate8to64
| OpKind::Immediate32to64
| OpKind::MemoryESDI
| OpKind::MemoryESEDI
| OpKind::MemoryESRDI => {}
OpKind::MemorySegSI
| OpKind::MemorySegESI
| OpKind::MemorySegRSI
| OpKind::MemorySegDI
| OpKind::MemorySegEDI
| OpKind::MemorySegRDI
| OpKind::Memory => return false,
}
}
SpecializedFormatter::<TraitOptions>::SHOW_USELESS_PREFIXES
}
#[inline]
#[must_use]
fn format_register(&self, dst: &mut Vec<u8>, mut dst_next_p: *mut u8, register: Register) -> *mut u8 {
let reg_str = self.d.all_registers[register as usize];
write_fast_str!(dst, dst_next_p, FastStringRegister, reg_str);
dst_next_p
}
#[must_use]
fn format_number(&self, dst: &mut Vec<u8>, mut dst_next_p: *mut u8, value: u64) -> *mut u8 {
macro_rules! format_number_impl {
($dst:ident, $dst_next_p:ident, $value:ident, $uppercase_hex:literal, $use_hex_prefix:literal) => {{
if $use_hex_prefix {
const FAST_STR: FastString4 = mk_const_fast_str!(FastString4, "\x020x ");
write_fast_str!($dst, $dst_next_p, FastString4, FAST_STR);
}
if $value < 0x10 {
if $use_hex_prefix {
let hex_table = if $uppercase_hex { b"0123456789ABCDEF" } else { b"0123456789abcdef" };
// SAFETY: 0<=$value<=0xF and hex_table.len() == 0x10
let c = unsafe { *hex_table.get_unchecked($value as usize) };
write_fast_ascii_char!($dst, $dst_next_p, c, true);
$dst_next_p
} else {
// 1 (possible '0' prefix) + 1 (hex digit) + 1 ('h' suffix)
verify_output_has_enough_bytes_left!($dst, $dst_next_p, 1 + 1 + 1);
if $value > 9 {
write_fast_ascii_char_lit!($dst, $dst_next_p, '0', false);
}
let hex_table = if $uppercase_hex { b"0123456789ABCDEF" } else { b"0123456789abcdef" };
// SAFETY: 0<=$value<=0xF and hex_table.len() == 0x10
let c = unsafe { *hex_table.get_unchecked($value as usize) };
write_fast_ascii_char!($dst, $dst_next_p, c, false);
write_fast_ascii_char_lit!($dst, $dst_next_p, 'h', false);
$dst_next_p
}
} else if $value < 0x100 {
if $use_hex_prefix {
let lower_or_value = if $uppercase_hex { 0 } else { 0x2020_2020 };
write_fast_hex2_rw_4bytes!($dst, $dst_next_p, $value, lower_or_value, true);
$dst_next_p
} else {
// 1 (possible '0' prefix) + 2 (hex digits) + 2 since
// write_fast_hex2_rw_4bytes!() reads/writes 4 bytes and not 2.
// '+2' also includes the 'h' suffix.
verify_output_has_enough_bytes_left!($dst, $dst_next_p, 1 + 2 + 2);
if $value > 0x9F {
write_fast_ascii_char_lit!($dst, $dst_next_p, '0', false);
}
let lower_or_value = if $uppercase_hex { 0 } else { 0x2020_2020 };
write_fast_hex2_rw_4bytes!($dst, $dst_next_p, $value, lower_or_value, false);
write_fast_ascii_char_lit!($dst, $dst_next_p, 'h', false);
$dst_next_p
}
} else {
let mut rshift = ((64 - u64::leading_zeros($value) + 3) & !3) as usize;
// The first '1' is an optional '0' prefix.
// `rshift / 4` == number of hex digits to copy. The last `+ 2` is the extra padding needed
// since the write_fast_hex2_rw_4bytes!() macro reads and writes 4 bytes (2 hex digits + 2 bytes padding).
// '+2' also includes the 'h' suffix.
verify_output_has_enough_bytes_left!($dst, $dst_next_p, 1 + rshift / 4 + 2);
if !$use_hex_prefix && (($value >> (rshift - 4)) & 0xF) > 9 {
write_fast_ascii_char_lit!($dst, $dst_next_p, '0', false);
}
// If odd number of hex digits
if (rshift & 4) != 0 {
rshift -= 4;
let hex_table = if $uppercase_hex { b"0123456789ABCDEF" } else { b"0123456789abcdef" };
let digit = (($value >> rshift) & 0xF) as usize;
// SAFETY: 0<=digit<=0xF and hex_table.len() == 0x10
let c = unsafe { *hex_table.get_unchecked(digit) };
write_fast_ascii_char!($dst, $dst_next_p, c, false);
}
// If we're here, $value >= 0x100 so rshift >= 8
debug_assert!(rshift >= 8);
let lower_or_value = if $uppercase_hex { 0 } else { 0x2020_2020 };
loop {
rshift -= 8;
let digits2 = (($value >> rshift) & 0xFF) as usize;
write_fast_hex2_rw_4bytes!($dst, $dst_next_p, digits2, lower_or_value, false);
if rshift == 0 {
break;
}
}
if !$use_hex_prefix {
// We've verified that `dst` had `1 + rshift / 4 + 2` bytes left (see above).
// The last `+2` is the padding that needed to be there. That's where
// this 'h' gets written so we don't need to verify the vec len here
// because it has at least 2 more bytes left.
write_fast_ascii_char_lit!($dst, $dst_next_p, 'h', false);
}
$dst_next_p
}
}};
}
if TraitOptions::uppercase_hex(&self.d.options) {
if TraitOptions::use_hex_prefix(&self.d.options) {
// 0x12AB
format_number_impl!(dst, dst_next_p, value, true, true)
} else {
// 12ABh
format_number_impl!(dst, dst_next_p, value, true, false)
}
} else {
if TraitOptions::use_hex_prefix(&self.d.options) {
// 0x12ab
format_number_impl!(dst, dst_next_p, value, false, true)
} else {
// 12abh
format_number_impl!(dst, dst_next_p, value, false, false)
}
}
}
#[inline]
#[must_use]
fn write_symbol(&self, dst: &mut Vec<u8>, mut dst_next_p: *mut u8, address: u64, symbol: &SymbolResult<'_>) -> *mut u8 {
call_write_symbol2!(self, dst, dst_next_p, address, symbol, true);
dst_next_p
}
#[cold]
#[must_use]
fn write_symbol2(
&self, dst: &mut Vec<u8>, mut dst_next_p: *mut u8, address: u64, symbol: &SymbolResult<'_>, write_minus_if_signed: bool,
) -> *mut u8 {
let mut displ = address.wrapping_sub(symbol.address) as i64;
if (symbol.flags & SymbolFlags::SIGNED) != 0 {
if write_minus_if_signed {
write_fast_ascii_char_lit!(dst, dst_next_p, '-', true);
}
displ = displ.wrapping_neg();
}
// Write the symbol. The symbol can be any length and is a `&'a str` so we must
// write using `dst`. The macro will invalidate `dst_next_p` and will restore
// it after the match statement.
use_dst_only_now!(dst, dst_next_p);
match symbol.text {
SymResTextInfo::Text(ref part) => {
let s = match &part.text {
&SymResString::Str(s) => s,
&SymResString::String(ref s) => s.as_str(),
};
dst.extend_from_slice(s.as_bytes());
}
SymResTextInfo::TextVec(v) => {
for part in v.iter() {
let s = match &part.text {
&SymResString::Str(s) => s,
&SymResString::String(ref s) => s.as_str(),
};
dst.extend_from_slice(s.as_bytes());
}
}
}
use_dst_next_p_now!(dst, dst_next_p);
if displ != 0 {
let c = if displ < 0 {
displ = displ.wrapping_neg();
'-'
} else {
'+'
};
write_fast_ascii_char!(dst, dst_next_p, c, true);
call_format_number!(self, dst, dst_next_p, displ as u64);
}
if TraitOptions::show_symbol_address(&self.d.options) {
const FAST_STR: FastString4 = mk_const_fast_str!(FastString4, "\x02 ( ");
write_fast_str!(dst, dst_next_p, FastString4, FAST_STR);
call_format_number!(self, dst, dst_next_p, address);
write_fast_ascii_char_lit!(dst, dst_next_p, ')', true);
}
dst_next_p
}
#[must_use]
fn format_memory(
&mut self, dst: &mut Vec<u8>, mut dst_next_p: *mut u8, instruction: &Instruction, operand: u32, seg_reg: Register, base_reg: Register,
index_reg: Register, scale: u32, displ_size: u32, displ: i64, addr_size: u32,
) -> *mut u8 {
format_memory_code!(self, dst, dst_next_p, instruction, operand, seg_reg, base_reg, index_reg, scale, displ_size, displ, addr_size);
dst_next_p
}
}
/// Fast formatter with less formatting options and with a masm-like syntax.
/// Use it if formatting speed is more important than being able to re-assemble formatted instructions.
///
/// This is a variant of [`SpecializedFormatter<TraitOptions>`] and allows changing the
/// formatter options at runtime and the use of a symbol resolver. For fastest possible
/// disassembly and smallest code, the options should be hard coded, so see [`SpecializedFormatter<TraitOptions>`].
///
/// This formatter is ~2.8x faster than the gas/intel/masm/nasm formatters (the time includes decoding + formatting).
///
/// [`SpecializedFormatter<TraitOptions>`]: struct.SpecializedFormatter.html
///
/// # Examples
///
/// ```
/// use iced_x86::*;
///
/// let bytes = b"\x62\xF2\x4F\xDD\x72\x50\x01";
/// let mut decoder = Decoder::new(64, bytes, DecoderOptions::NONE);
/// let instr = decoder.decode();
///
/// let mut output = String::new();
/// let mut formatter = FastFormatter::new();
/// formatter.options_mut().set_space_after_operand_separator(true);
/// formatter.format(&instr, &mut output);
/// assert_eq!(output, "vcvtne2ps2bf16 zmm2{k5}{z}, zmm6, dword bcst [rax+4h]");
/// ```
///
/// # Using a symbol resolver
///
/// ```
/// use iced_x86::*;
/// use std::collections::HashMap;
///
/// let bytes = b"\x48\x8B\x8A\xA5\x5A\xA5\x5A";
/// let mut decoder = Decoder::new(64, bytes, DecoderOptions::NONE);
/// let instr = decoder.decode();
///
/// struct MySymbolResolver { map: HashMap<u64, String> }
/// impl SymbolResolver for MySymbolResolver {
/// fn symbol(&mut self, _instruction: &Instruction, _operand: u32, _instruction_operand: Option<u32>,
/// address: u64, _address_size: u32) -> Option<SymbolResult> {
/// if let Some(symbol_string) = self.map.get(&address) {
/// // The 'address' arg is the address of the symbol and doesn't have to be identical
/// // to the 'address' arg passed to symbol(). If it's different from the input
/// // address, the formatter will add +N or -N, eg. '[rax+symbol+123]'
/// Some(SymbolResult::with_str(address, symbol_string.as_str()))
/// } else {
/// None
/// }
/// }
/// }
///
/// // Hard code the symbols, it's just an example!😄
/// let mut sym_map: HashMap<u64, String> = HashMap::new();
/// sym_map.insert(0x5AA55AA5, String::from("my_data"));
///
/// let mut output = String::new();
/// let resolver = Box::new(MySymbolResolver { map: sym_map });
/// let mut formatter = FastFormatter::try_with_options(Some(resolver)).unwrap();
/// formatter.format(&instr, &mut output);
/// assert_eq!("mov rcx,[rdx+my_data]", output);
/// ```
pub type FastFormatter = SpecializedFormatter<DefaultFastFormatterTraitOptions>;
/// Default [`SpecializedFormatter<TraitOptions>`] options. It doesn't override any `const` or `fn`
///
/// [`SpecializedFormatter<TraitOptions>`]: struct.SpecializedFormatter.html
#[allow(missing_copy_implementations)]
#[allow(missing_debug_implementations)]
pub struct DefaultSpecializedFormatterTraitOptions;
impl SpecializedFormatterTraitOptions for DefaultSpecializedFormatterTraitOptions {}
|
ions(&s
|
fr_RE_test.go
|
package fr_RE
import (
"testing"
"time"
"github.com/jinycoo/jinygo/text/i18n/locales"
"github.com/jinycoo/jinygo/text/i18n/locales/currency"
)
func TestLocale(t *testing.T) {
trans := New()
expected := "fr_RE"
if trans.Locale() != expected {
t.Errorf("Expected '%s' Got '%s'", expected, trans.Locale())
}
}
func TestPluralsRange(t *testing.T) {
trans := New()
tests := []struct {
expected locales.PluralRule
}{
// {
// expected: locales.PluralRuleOther,
// },
}
rules := trans.PluralsRange()
// expected := 1
// if len(rules) != expected {
// t.Errorf("Expected '%d' Got '%d'", expected, len(rules))
// }
for _, tt := range tests {
r := locales.PluralRuleUnknown
for i := 0; i < len(rules); i++ {
if rules[i] == tt.expected {
r = rules[i]
break
}
}
if r == locales.PluralRuleUnknown {
t.Errorf("Expected '%s' Got '%s'", tt.expected, r)
}
}
}
func TestPluralsOrdinal(t *testing.T) {
trans := New()
tests := []struct {
expected locales.PluralRule
}{
// {
// expected: locales.PluralRuleOne,
// },
// {
// expected: locales.PluralRuleTwo,
// },
// {
// expected: locales.PluralRuleFew,
// },
// {
// expected: locales.PluralRuleOther,
// },
}
rules := trans.PluralsOrdinal()
// expected := 4
// if len(rules) != expected {
// t.Errorf("Expected '%d' Got '%d'", expected, len(rules))
// }
for _, tt := range tests {
r := locales.PluralRuleUnknown
for i := 0; i < len(rules); i++ {
if rules[i] == tt.expected {
r = rules[i]
break
}
}
if r == locales.PluralRuleUnknown {
t.Errorf("Expected '%s' Got '%s'", tt.expected, r)
}
}
}
func TestPluralsCardinal(t *testing.T) {
trans := New()
tests := []struct {
expected locales.PluralRule
}{
// {
// expected: locales.PluralRuleOne,
// },
// {
// expected: locales.PluralRuleOther,
// },
}
rules := trans.PluralsCardinal()
// expected := 2
// if len(rules) != expected {
// t.Errorf("Expected '%d' Got '%d'", expected, len(rules))
// }
for _, tt := range tests {
r := locales.PluralRuleUnknown
for i := 0; i < len(rules); i++ {
if rules[i] == tt.expected {
r = rules[i]
break
}
}
if r == locales.PluralRuleUnknown {
t.Errorf("Expected '%s' Got '%s'", tt.expected, r)
}
}
}
func TestRangePlurals(t *testing.T) {
trans := New()
tests := []struct {
num1 float64
v1 uint64
num2 float64
v2 uint64
expected locales.PluralRule
}{
// {
// num1: 1,
// v1: 1,
// num2: 2,
// v2: 2,
// expected: locales.PluralRuleOther,
// },
}
for _, tt := range tests {
rule := trans.RangePluralRule(tt.num1, tt.v1, tt.num2, tt.v2)
if rule != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, rule)
}
}
}
func TestOrdinalPlurals(t *testing.T) {
trans := New()
tests := []struct {
num float64
v uint64
expected locales.PluralRule
}{
// {
// num: 1,
// v: 0,
// expected: locales.PluralRuleOne,
// },
// {
// num: 2,
// v: 0,
// expected: locales.PluralRuleTwo,
// },
// {
// num: 3,
// v: 0,
// expected: locales.PluralRuleFew,
// },
// {
// num: 4,
// v: 0,
// expected: locales.PluralRuleOther,
// },
}
for _, tt := range tests {
rule := trans.OrdinalPluralRule(tt.num, tt.v)
if rule != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, rule)
}
}
}
func TestCardinalPlurals(t *testing.T) {
trans := New()
tests := []struct {
num float64
v uint64
expected locales.PluralRule
}{
// {
// num: 1,
// v: 0,
// expected: locales.PluralRuleOne,
// },
// {
// num: 4,
// v: 0,
// expected: locales.PluralRuleOther,
// },
}
for _, tt := range tests {
rule := trans.CardinalPluralRule(tt.num, tt.v)
if rule != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, rule)
}
}
}
|
days := trans.WeekdaysAbbreviated()
for i, day := range days {
s := trans.WeekdayAbbreviated(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", day, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "Sun",
// },
// {
// idx: 1,
// expected: "Mon",
// },
// {
// idx: 2,
// expected: "Tue",
// },
// {
// idx: 3,
// expected: "Wed",
// },
// {
// idx: 4,
// expected: "Thu",
// },
// {
// idx: 5,
// expected: "Fri",
// },
// {
// idx: 6,
// expected: "Sat",
// },
}
for _, tt := range tests {
s := trans.WeekdayAbbreviated(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestDaysNarrow(t *testing.T) {
trans := New()
days := trans.WeekdaysNarrow()
for i, day := range days {
s := trans.WeekdayNarrow(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", string(day), s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "S",
// },
// {
// idx: 1,
// expected: "M",
// },
// {
// idx: 2,
// expected: "T",
// },
// {
// idx: 3,
// expected: "W",
// },
// {
// idx: 4,
// expected: "T",
// },
// {
// idx: 5,
// expected: "F",
// },
// {
// idx: 6,
// expected: "S",
// },
}
for _, tt := range tests {
s := trans.WeekdayNarrow(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestDaysShort(t *testing.T) {
trans := New()
days := trans.WeekdaysShort()
for i, day := range days {
s := trans.WeekdayShort(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", day, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "Su",
// },
// {
// idx: 1,
// expected: "Mo",
// },
// {
// idx: 2,
// expected: "Tu",
// },
// {
// idx: 3,
// expected: "We",
// },
// {
// idx: 4,
// expected: "Th",
// },
// {
// idx: 5,
// expected: "Fr",
// },
// {
// idx: 6,
// expected: "Sa",
// },
}
for _, tt := range tests {
s := trans.WeekdayShort(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestDaysWide(t *testing.T) {
trans := New()
days := trans.WeekdaysWide()
for i, day := range days {
s := trans.WeekdayWide(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", day, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "Sunday",
// },
// {
// idx: 1,
// expected: "Monday",
// },
// {
// idx: 2,
// expected: "Tuesday",
// },
// {
// idx: 3,
// expected: "Wednesday",
// },
// {
// idx: 4,
// expected: "Thursday",
// },
// {
// idx: 5,
// expected: "Friday",
// },
// {
// idx: 6,
// expected: "Saturday",
// },
}
for _, tt := range tests {
s := trans.WeekdayWide(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestMonthsAbbreviated(t *testing.T) {
trans := New()
months := trans.MonthsAbbreviated()
for i, month := range months {
s := trans.MonthAbbreviated(time.Month(i + 1))
if s != month {
t.Errorf("Expected '%s' Got '%s'", month, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 1,
// expected: "Jan",
// },
// {
// idx: 2,
// expected: "Feb",
// },
// {
// idx: 3,
// expected: "Mar",
// },
// {
// idx: 4,
// expected: "Apr",
// },
// {
// idx: 5,
// expected: "May",
// },
// {
// idx: 6,
// expected: "Jun",
// },
// {
// idx: 7,
// expected: "Jul",
// },
// {
// idx: 8,
// expected: "Aug",
// },
// {
// idx: 9,
// expected: "Sep",
// },
// {
// idx: 10,
// expected: "Oct",
// },
// {
// idx: 11,
// expected: "Nov",
// },
// {
// idx: 12,
// expected: "Dec",
// },
}
for _, tt := range tests {
s := trans.MonthAbbreviated(time.Month(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestMonthsNarrow(t *testing.T) {
trans := New()
months := trans.MonthsNarrow()
for i, month := range months {
s := trans.MonthNarrow(time.Month(i + 1))
if s != month {
t.Errorf("Expected '%s' Got '%s'", month, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 1,
// expected: "J",
// },
// {
// idx: 2,
// expected: "F",
// },
// {
// idx: 3,
// expected: "M",
// },
// {
// idx: 4,
// expected: "A",
// },
// {
// idx: 5,
// expected: "M",
// },
// {
// idx: 6,
// expected: "J",
// },
// {
// idx: 7,
// expected: "J",
// },
// {
// idx: 8,
// expected: "A",
// },
// {
// idx: 9,
// expected: "S",
// },
// {
// idx: 10,
// expected: "O",
// },
// {
// idx: 11,
// expected: "N",
// },
// {
// idx: 12,
// expected: "D",
// },
}
for _, tt := range tests {
s := trans.MonthNarrow(time.Month(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestMonthsWide(t *testing.T) {
trans := New()
months := trans.MonthsWide()
for i, month := range months {
s := trans.MonthWide(time.Month(i + 1))
if s != month {
t.Errorf("Expected '%s' Got '%s'", month, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 1,
// expected: "January",
// },
// {
// idx: 2,
// expected: "February",
// },
// {
// idx: 3,
// expected: "March",
// },
// {
// idx: 4,
// expected: "April",
// },
// {
// idx: 5,
// expected: "May",
// },
// {
// idx: 6,
// expected: "June",
// },
// {
// idx: 7,
// expected: "July",
// },
// {
// idx: 8,
// expected: "August",
// },
// {
// idx: 9,
// expected: "September",
// },
// {
// idx: 10,
// expected: "October",
// },
// {
// idx: 11,
// expected: "November",
// },
// {
// idx: 12,
// expected: "December",
// },
}
for _, tt := range tests {
s := string(trans.MonthWide(time.Month(tt.idx)))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeFull(t *testing.T) {
// loc, err := time.LoadLocation("America/Toronto")
// if err != nil {
// t.Errorf("Expected '<nil>' Got '%s'", err)
// }
// fixed := time.FixedZone("OTHER", -4)
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, loc),
// expected: "9:05:01 am Eastern Standard Time",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, fixed),
// expected: "8:05:01 pm OTHER",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeFull(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeLong(t *testing.T) {
// loc, err := time.LoadLocation("America/Toronto")
// if err != nil {
// t.Errorf("Expected '<nil>' Got '%s'", err)
// }
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, loc),
// expected: "9:05:01 am EST",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, loc),
// expected: "8:05:01 pm EST",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeLong(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeMedium(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, time.UTC),
// expected: "9:05:01 am",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, time.UTC),
// expected: "8:05:01 pm",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeMedium(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeShort(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, time.UTC),
// expected: "9:05 am",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, time.UTC),
// expected: "8:05 pm",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeShort(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateFull(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "Wednesday, February 3, 2016",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateFull(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateLong(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "February 3, 2016",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateLong(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateMedium(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "Feb 3, 2016",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateMedium(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateShort(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "2/3/16",
// },
// {
// t: time.Date(-500, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "2/3/500",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateShort(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtNumber(t *testing.T) {
tests := []struct {
num float64
v uint64
expected string
}{
// {
// num: 1123456.5643,
// v: 2,
// expected: "1,123,456.56",
// },
// {
// num: 1123456.5643,
// v: 1,
// expected: "1,123,456.6",
// },
// {
// num: 221123456.5643,
// v: 3,
// expected: "221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// expected: "-221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// expected: "-221,123,456.564",
// },
// {
// num: 0,
// v: 2,
// expected: "0.00",
// },
// {
// num: -0,
// v: 2,
// expected: "0.00",
// },
// {
// num: -0,
// v: 2,
// expected: "0.00",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtNumber(tt.num, tt.v)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtCurrency(t *testing.T) {
tests := []struct {
num float64
v uint64
currency currency.Type
expected string
}{
// {
// num: 1123456.5643,
// v: 2,
// currency: currency.USD,
// expected: "$1,123,456.56",
// },
// {
// num: 1123456.5643,
// v: 1,
// currency: currency.USD,
// expected: "$1,123,456.60",
// },
// {
// num: 221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "$221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "-$221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.CAD,
// expected: "-CAD 221,123,456.564",
// },
// {
// num: 0,
// v: 2,
// currency: currency.USD,
// expected: "$0.00",
// },
// {
// num: -0,
// v: 2,
// currency: currency.USD,
// expected: "$0.00",
// },
// {
// num: -0,
// v: 2,
// currency: currency.CAD,
// expected: "CAD 0.00",
// },
// {
// num: 1.23,
// v: 0,
// currency: currency.USD,
// expected: "$1.00",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtCurrency(tt.num, tt.v, tt.currency)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtAccounting(t *testing.T) {
tests := []struct {
num float64
v uint64
currency currency.Type
expected string
}{
// {
// num: 1123456.5643,
// v: 2,
// currency: currency.USD,
// expected: "$1,123,456.56",
// },
// {
// num: 1123456.5643,
// v: 1,
// currency: currency.USD,
// expected: "$1,123,456.60",
// },
// {
// num: 221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "$221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "($221,123,456.564)",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.CAD,
// expected: "(CAD 221,123,456.564)",
// },
// {
// num: -0,
// v: 2,
// currency: currency.USD,
// expected: "$0.00",
// },
// {
// num: -0,
// v: 2,
// currency: currency.CAD,
// expected: "CAD 0.00",
// },
// {
// num: 1.23,
// v: 0,
// currency: currency.USD,
// expected: "$1.00",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtAccounting(tt.num, tt.v, tt.currency)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtPercent(t *testing.T) {
tests := []struct {
num float64
v uint64
expected string
}{
// {
// num: 15,
// v: 0,
// expected: "15%",
// },
// {
// num: 15,
// v: 2,
// expected: "15.00%",
// },
// {
// num: 434.45,
// v: 0,
// expected: "434%",
// },
// {
// num: 34.4,
// v: 2,
// expected: "34.40%",
// },
// {
// num: -34,
// v: 0,
// expected: "-34%",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtPercent(tt.num, tt.v)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
|
func TestDaysAbbreviated(t *testing.T) {
trans := New()
|
procps.go
|
// Copyright 2021 Northern.tech AS
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procps
import (
"errors"
"os"
"os/exec"
"strconv"
"syscall"
"time"
)
func ProcessExists(pid int) bool {
p, err := os.FindProcess(pid)
err = p.Signal(syscall.Signal(0))
return err == nil
}
func TerminateAndWait(pid int, command *exec.Cmd, waitTimeout time.Duration) (err error) {
p, _ := os.FindProcess(pid)
p.Signal(syscall.SIGTERM)
time.Sleep(2 * time.Second)
p.Signal(syscall.SIGKILL)
time.Sleep(2 * time.Second)
done := make(chan error, 1)
go func() {
done <- command.Wait()
}()
select {
case err := <-done:
if err != nil && err.Error() != "signal: killed" && err.Error() != "signal: hangup" && err.Error() != "exit status 130"
|
case <-time.After(waitTimeout):
return errors.New("waiting for pid " + strconv.Itoa(pid) + " timeout. the process will remain as zombie.")
}
return nil
}
|
{
return errors.New("error waiting for the process: " + err.Error())
}
|
func_translator.rs
|
//! Stand-alone WebAssembly to Cranelift IR translator.
//!
//! This module defines the `FuncTranslator` type which can translate a single WebAssembly
//! function to Cranelift IR guided by a `FuncEnvironment` which provides information about the
//! WebAssembly module and the runtime environment.
use crate::code_translator::{bitcast_arguments, translate_operator};
use crate::environ::{FuncEnvironment, ReturnMode, WasmResult};
use crate::state::{FuncTranslationState, ModuleTranslationState};
use crate::translation_utils::get_vmctx_value_label;
use crate::wasm_unsupported;
use cranelift_codegen::entity::EntityRef;
use cranelift_codegen::ir::{self, Ebb, InstBuilder, ValueLabel};
use cranelift_codegen::timing;
use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
use log::info;
use wasmparser::{self, BinaryReader};
/// WebAssembly to Cranelift IR function translator.
///
/// A `FuncTranslator` is used to translate a binary WebAssembly function into Cranelift IR guided
/// by a `FuncEnvironment` object. A single translator instance can be reused to translate multiple
/// functions which will reduce heap allocation traffic.
pub struct FuncTranslator {
func_ctx: FunctionBuilderContext,
state: FuncTranslationState,
}
impl FuncTranslator {
/// Create a new translator.
pub fn new() -> Self {
Self {
func_ctx: FunctionBuilderContext::new(),
state: FuncTranslationState::new(),
}
}
/// Translate a binary WebAssembly function.
///
/// The `code` slice contains the binary WebAssembly *function code* as it appears in the code
/// section of a WebAssembly module, not including the initial size of the function code. The
/// slice is expected to contain two parts:
///
/// - The declaration of *locals*, and
/// - The function *body* as an expression.
///
/// See [the WebAssembly specification][wasm].
///
/// [wasm]: https://webassembly.github.io/spec/core/binary/modules.html#code-section
///
/// The Cranelift IR function `func` should be completely empty except for the `func.signature`
/// and `func.name` fields. The signature may contain special-purpose arguments which are not
/// regarded as WebAssembly local variables. Any signature arguments marked as
/// `ArgumentPurpose::Normal` are made accessible as WebAssembly local variables.
///
pub fn translate<FE: FuncEnvironment + ?Sized>(
&mut self,
module_translation_state: &ModuleTranslationState,
code: &[u8],
code_offset: usize,
func: &mut ir::Function,
environ: &mut FE,
) -> WasmResult<()> {
self.translate_from_reader(
module_translation_state,
BinaryReader::new_with_offset(code, code_offset),
func,
environ,
)
}
/// Translate a binary WebAssembly function from a `BinaryReader`.
pub fn translate_from_reader<FE: FuncEnvironment + ?Sized>(
&mut self,
module_translation_state: &ModuleTranslationState,
mut reader: BinaryReader,
func: &mut ir::Function,
environ: &mut FE,
) -> WasmResult<()> {
let _tt = timing::wasm_translate_function();
info!(
"translate({} bytes, {}{})",
reader.bytes_remaining(),
func.name,
func.signature
);
debug_assert_eq!(func.dfg.num_ebbs(), 0, "Function must be empty");
debug_assert_eq!(func.dfg.num_insts(), 0, "Function must be empty");
// This clears the `FunctionBuilderContext`.
let mut builder = FunctionBuilder::new(func, &mut self.func_ctx);
builder.set_srcloc(cur_srcloc(&reader));
let entry_block = builder.create_ebb();
builder.append_ebb_params_for_function_params(entry_block);
builder.switch_to_block(entry_block); // This also creates values for the arguments.
builder.seal_block(entry_block); // Declare all predecessors known.
// Make sure the entry block is inserted in the layout before we make any callbacks to
// `environ`. The callback functions may need to insert things in the entry block.
builder.ensure_inserted_ebb();
let num_params = declare_wasm_parameters(&mut builder, entry_block, environ);
// Set up the translation state with a single pushed control block representing the whole
// function and its return values.
let exit_block = builder.create_ebb();
builder.append_ebb_params_for_function_returns(exit_block);
self.state.initialize(&builder.func.signature, exit_block);
parse_local_decls(&mut reader, &mut builder, num_params, environ)?;
parse_function_body(
module_translation_state,
reader,
&mut builder,
&mut self.state,
environ,
)?;
builder.finalize();
Ok(())
}
}
/// Declare local variables for the signature parameters that correspond to WebAssembly locals.
///
/// Return the number of local variables declared.
fn declare_wasm_parameters<FE: FuncEnvironment + ?Sized>(
builder: &mut FunctionBuilder,
entry_block: Ebb,
environ: &FE,
) -> usize {
let sig_len = builder.func.signature.params.len();
let mut next_local = 0;
for i in 0..sig_len {
let param_type = builder.func.signature.params[i];
// There may be additional special-purpose parameters in addition to the normal WebAssembly
// signature parameters. For example, a `vmctx` pointer.
if environ.is_wasm_parameter(&builder.func.signature, i) {
// This is a normal WebAssembly signature parameter, so create a local for it.
let local = Variable::new(next_local);
builder.declare_var(local, param_type.value_type);
next_local += 1;
let param_value = builder.ebb_params(entry_block)[i];
builder.def_var(local, param_value);
}
if param_type.purpose == ir::ArgumentPurpose::VMContext {
let param_value = builder.ebb_params(entry_block)[i];
builder.set_val_label(param_value, get_vmctx_value_label());
}
}
next_local
}
/// Parse the local variable declarations that precede the function body.
///
/// Declare local variables, starting from `num_params`.
fn parse_local_decls<FE: FuncEnvironment + ?Sized>(
reader: &mut BinaryReader,
builder: &mut FunctionBuilder,
num_params: usize,
environ: &mut FE,
) -> WasmResult<()> {
let mut next_local = num_params;
let local_count = reader.read_local_count()?;
let mut locals_total = 0;
for _ in 0..local_count {
builder.set_srcloc(cur_srcloc(reader));
let (count, ty) = reader.read_local_decl(&mut locals_total)?;
declare_locals(builder, count, ty, &mut next_local, environ)?;
}
Ok(())
}
/// Declare `count` local variables of the same type, starting from `next_local`.
///
/// Fail of too many locals are declared in the function, or if the type is not valid for a local.
fn
|
<FE: FuncEnvironment + ?Sized>(
builder: &mut FunctionBuilder,
count: u32,
wasm_type: wasmparser::Type,
next_local: &mut usize,
environ: &mut FE,
) -> WasmResult<()> {
// All locals are initialized to 0.
use wasmparser::Type::*;
let zeroval = match wasm_type {
I32 => builder.ins().iconst(ir::types::I32, 0),
I64 => builder.ins().iconst(ir::types::I64, 0),
F32 => builder.ins().f32const(ir::immediates::Ieee32::with_bits(0)),
F64 => builder.ins().f64const(ir::immediates::Ieee64::with_bits(0)),
V128 => {
let constant_handle = builder.func.dfg.constants.insert([0; 16].to_vec().into());
builder.ins().vconst(ir::types::I8X16, constant_handle)
}
AnyRef => builder.ins().null(environ.reference_type()),
AnyFunc => builder.ins().null(environ.reference_type()),
ty => return Err(wasm_unsupported!("unsupported local type {:?}", ty)),
};
let ty = builder.func.dfg.value_type(zeroval);
for _ in 0..count {
let local = Variable::new(*next_local);
builder.declare_var(local, ty);
builder.def_var(local, zeroval);
builder.set_val_label(zeroval, ValueLabel::new(*next_local));
*next_local += 1;
}
Ok(())
}
/// Parse the function body in `reader`.
///
/// This assumes that the local variable declarations have already been parsed and function
/// arguments and locals are declared in the builder.
fn parse_function_body<FE: FuncEnvironment + ?Sized>(
module_translation_state: &ModuleTranslationState,
mut reader: BinaryReader,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
environ: &mut FE,
) -> WasmResult<()> {
// The control stack is initialized with a single block representing the whole function.
debug_assert_eq!(state.control_stack.len(), 1, "State not initialized");
// Keep going until the final `End` operator which pops the outermost block.
while !state.control_stack.is_empty() {
builder.set_srcloc(cur_srcloc(&reader));
let op = reader.read_operator()?;
environ.before_translate_operator(&op, builder, state)?;
translate_operator(module_translation_state, &op, builder, state, environ)?;
environ.after_translate_operator(&op, builder, state)?;
}
// The final `End` operator left us in the exit block where we need to manually add a return
// instruction.
//
// If the exit block is unreachable, it may not have the correct arguments, so we would
// generate a return instruction that doesn't match the signature.
if state.reachable {
debug_assert!(builder.is_pristine());
if !builder.is_unreachable() {
match environ.return_mode() {
ReturnMode::NormalReturns => {
let return_types = &builder.func.signature.return_types();
bitcast_arguments(&mut state.stack, &return_types, builder);
builder.ins().return_(&state.stack)
}
ReturnMode::FallthroughReturn => builder.ins().fallthrough_return(&state.stack),
};
}
}
// Discard any remaining values on the stack. Either we just returned them,
// or the end of the function is unreachable.
state.stack.clear();
debug_assert!(reader.eof());
Ok(())
}
/// Get the current source location from a reader.
fn cur_srcloc(reader: &BinaryReader) -> ir::SourceLoc {
// We record source locations as byte code offsets relative to the beginning of the file.
// This will wrap around if byte code is larger than 4 GB.
ir::SourceLoc::new(reader.original_position() as u32)
}
#[cfg(test)]
mod tests {
use super::{FuncTranslator, ReturnMode};
use crate::environ::DummyEnvironment;
use crate::ModuleTranslationState;
use cranelift_codegen::ir::types::I32;
use cranelift_codegen::{ir, isa, settings, Context};
use log::debug;
use target_lexicon::PointerWidth;
#[test]
fn small1() {
// Implicit return.
//
// (func $small1 (param i32) (result i32)
// (i32.add (get_local 0) (i32.const 1))
// )
const BODY: [u8; 7] = [
0x00, // local decl count
0x20, 0x00, // get_local 0
0x41, 0x01, // i32.const 1
0x6a, // i32.add
0x0b, // end
];
let mut trans = FuncTranslator::new();
let flags = settings::Flags::new(settings::builder());
let runtime = DummyEnvironment::new(
isa::TargetFrontendConfig {
default_call_conv: isa::CallConv::Fast,
pointer_width: PointerWidth::U64,
},
ReturnMode::NormalReturns,
false,
);
let module_translation_state = ModuleTranslationState::new();
let mut ctx = Context::new();
ctx.func.name = ir::ExternalName::testcase("small1");
ctx.func.signature.params.push(ir::AbiParam::new(I32));
ctx.func.signature.returns.push(ir::AbiParam::new(I32));
trans
.translate(
&module_translation_state,
&BODY,
0,
&mut ctx.func,
&mut runtime.func_env(),
)
.unwrap();
debug!("{}", ctx.func.display(None));
ctx.verify(&flags).unwrap();
}
#[test]
fn small2() {
// Same as above, but with an explicit return instruction.
//
// (func $small2 (param i32) (result i32)
// (return (i32.add (get_local 0) (i32.const 1)))
// )
const BODY: [u8; 8] = [
0x00, // local decl count
0x20, 0x00, // get_local 0
0x41, 0x01, // i32.const 1
0x6a, // i32.add
0x0f, // return
0x0b, // end
];
let mut trans = FuncTranslator::new();
let flags = settings::Flags::new(settings::builder());
let runtime = DummyEnvironment::new(
isa::TargetFrontendConfig {
default_call_conv: isa::CallConv::Fast,
pointer_width: PointerWidth::U64,
},
ReturnMode::NormalReturns,
false,
);
let module_translation_state = ModuleTranslationState::new();
let mut ctx = Context::new();
ctx.func.name = ir::ExternalName::testcase("small2");
ctx.func.signature.params.push(ir::AbiParam::new(I32));
ctx.func.signature.returns.push(ir::AbiParam::new(I32));
trans
.translate(
&module_translation_state,
&BODY,
0,
&mut ctx.func,
&mut runtime.func_env(),
)
.unwrap();
debug!("{}", ctx.func.display(None));
ctx.verify(&flags).unwrap();
}
#[test]
fn infloop() {
// An infinite loop, no return instructions.
//
// (func $infloop (result i32)
// (local i32)
// (loop (result i32)
// (i32.add (get_local 0) (i32.const 1))
// (set_local 0)
// (br 0)
// )
// )
const BODY: [u8; 16] = [
0x01, // 1 local decl.
0x01, 0x7f, // 1 i32 local.
0x03, 0x7f, // loop i32
0x20, 0x00, // get_local 0
0x41, 0x01, // i32.const 0
0x6a, // i32.add
0x21, 0x00, // set_local 0
0x0c, 0x00, // br 0
0x0b, // end
0x0b, // end
];
let mut trans = FuncTranslator::new();
let flags = settings::Flags::new(settings::builder());
let runtime = DummyEnvironment::new(
isa::TargetFrontendConfig {
default_call_conv: isa::CallConv::Fast,
pointer_width: PointerWidth::U64,
},
ReturnMode::NormalReturns,
false,
);
let module_translation_state = ModuleTranslationState::new();
let mut ctx = Context::new();
ctx.func.name = ir::ExternalName::testcase("infloop");
ctx.func.signature.returns.push(ir::AbiParam::new(I32));
trans
.translate(
&module_translation_state,
&BODY,
0,
&mut ctx.func,
&mut runtime.func_env(),
)
.unwrap();
debug!("{}", ctx.func.display(None));
ctx.verify(&flags).unwrap();
}
}
|
declare_locals
|
kafka_source.rs
|
use timely::dataflow::operators::Inspect;
use rdkafka::config::ClientConfig;
use rdkafka::consumer::{Consumer, BaseConsumer, DefaultConsumerContext};
fn
|
() {
let mut args = ::std::env::args();
args.next();
// Extract Kafka topic.
let topic = args.next().expect("Must specify a Kafka topic");
let brokers = "localhost:9092";
// Create Kafka consumer configuration.
// Feel free to change parameters here.
let mut consumer_config = ClientConfig::new();
consumer_config
.set("produce.offset.report", "true")
.set("auto.offset.reset", "smallest")
.set("group.id", "example")
.set("enable.auto.commit", "false")
.set("enable.partition.eof", "false")
.set("auto.offset.reset", "earliest")
.set("session.timeout.ms", "6000")
.set("bootstrap.servers", &brokers);
timely::execute_from_args(args, move |worker| {
// A dataflow for producing spans.
worker.dataflow::<u64,_,_>(|scope| {
// Create a Kafka consumer.
let consumer : BaseConsumer<DefaultConsumerContext> = consumer_config.create().expect("Couldn't create consumer");
consumer.subscribe(&[&topic]).expect("Failed to subscribe to topic");
let strings =
kafkaesque::source(scope, "KafkaStringSource", consumer, |bytes, capability, output| {
// If the bytes are utf8, convert to string and send.
if let Ok(text) = std::str::from_utf8(bytes) {
output
.session(capability)
.give(text.to_string());
}
// We need some rule to advance timestamps ...
let time = *capability.time();
capability.downgrade(&(time + 1));
// Indicate that we are not yet done.
false
});
strings.inspect(|x| println!("Observed: {:?}", x));
});
}).expect("Timely computation failed somehow");
println!("Hello, world!");
}
|
main
|
webrtc.js
|
navigator.getUserMedia = (navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia);
function getUserMedia(constaints, onSuccess, onError) {
navigator.getUserMedia(constaints, onSuccess, onError);
}
function isInnerCandidate(candidate){
var innerIP = /192.[\d]+.[\d]+.[\d]+/ig;
return innerIP.test(candidate);
}
function createPeerConnection(){
var servers = {
iceServers: [
{url: "stun:stun.l.google.com:19302"}
]
};
var pcConstraint = null;
return new RTCPeerConnection(servers, pcConstraint);
}
function
|
(peerConnection, type, onopen, onclose, onmessage){
if(!(peerConnection instanceof RTCPeerConnection)){
throw new Error('PeerConnection is not valid');
}
var dataConstraint = null;
var dataChannel = peerConnection.createDataChannel(type, dataConstraint);
dataChannel.onopen = onopen;
dataChannel.onclose = onclose;
dataChannel.onmessage = onmessage;
return dataChannel;
}
function createOffer(peerConnection, enableAudio, enableVideo){
if(!(peerConnection instanceof RTCPeerConnection)){
throw new Error('PeerConnection is not valid');
}
var options = {
offerToReceiveAudio: typeof enableAudio === 'undefined' ? 1 : enableAudio,
offerToReceiveVideo: typeof enableVideo === 'undefined' ? 1 : enableVideo
};
return peerConnection.createOffer(options);
}
function createAnswer(peerConnection){
if(!(peerConnection instanceof RTCPeerConnection)){
throw new Error('PeerConnection is not valid');
}
return peerConnection.createAnswer();
}
|
createDataChannel
|
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def
|
():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blogsrc.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
main
|
tracker.rs
|
use crate::{config::Config, store::Store};
use carapax::{
context::Context,
core::{methods::GetChatMember, types::Update, Api},
HandlerFuture, HandlerResult,
};
use futures::Future;
pub fn
|
(context: &mut Context, update: Update) -> HandlerFuture {
let user_id = update.get_user().map(|x| x.id);
let chat_id = update.get_chat_id();
if let (Some(user_id), Some(chat_id)) = (user_id, chat_id) {
let config = context.get::<Config>();
if config.chat_id == chat_id {
let api = context.get::<Api>();
let store = context.get::<Store>().clone();
return HandlerFuture::new(api.execute(GetChatMember::new(chat_id, user_id)).and_then(
move |member| {
store
.set_user(member.user().clone())
.map(|()| HandlerResult::Continue)
},
));
}
}
HandlerResult::Continue.into()
}
|
handle_update
|
0014_auto_20171121_1804.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-21 21:04
from __future__ import unicode_literals
from django.db import migrations, models
|
dependencies = [
('core', '0013_auto_20171106_1017'),
]
operations = [
migrations.AlterField(
model_name='project',
name='description',
field=models.CharField(blank=True, max_length=250),
),
migrations.AlterField(
model_name='project',
name='location',
field=models.CharField(blank=True, max_length=150),
),
]
|
class Migration(migrations.Migration):
|
auth.e2e-spec.ts
|
import { Test, TestingModule } from '@nestjs/testing';
import { INestApplication } from '@nestjs/common';
import * as request from 'supertest';
import { AppModule } from '../src/app.module';
describe('Authentication sytem', () => {
let app: INestApplication;
beforeEach(async () => {
const moduleFixture: TestingModule = await Test.createTestingModule({
imports: [AppModule],
}).compile();
app = moduleFixture.createNestApplication();
await app.init();
});
it('auth/signup', () => {
const email = '[email protected]';
return request(app.getHttpServer())
.post('/auth/signup')
.send({ email: email, password: '123456'})
.expect(201)
.then((res) => {
const {id, email} = res.body;
expect(id).toBeDefined();
expect(email).toEqual(email)
})
});
it('auth/signup then whoami', async () => {
|
.send({ email: email, password: '123456'})
.expect(201);
const cookie = res.get('Set-Cookie');
const {body} = await request(app.getHttpServer())
.get('/auth/whoami')
.set('Cookie', cookie)
.expect(200)
expect(body.email).toEqual(email);
});
});
|
const email = '[email protected]';
const res = await request(app.getHttpServer())
.post('/auth/signup')
|
passthrough.go
|
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package passthrough implements a pass-through resolver. It sends the target
// name without scheme back to gRPC as resolved address.
package passthrough
import "github.com/publica-project/grpc/resolver"
const scheme = "passthrough"
type passthroughBuilder struct{}
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
r := &passthroughResolver{
target: target,
cc: cc,
}
r.start()
return r, nil
}
func (*passthroughBuilder) Scheme() string {
return scheme
}
type passthroughResolver struct {
target resolver.Target
cc resolver.ClientConn
}
func (r *passthroughResolver) start() {
r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}})
}
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
func (*passthroughResolver) Close() {}
func
|
() {
resolver.Register(&passthroughBuilder{})
}
|
init
|
test_dxl_comms.py
|
# Logging level must be set before importing any stretch_body class
import stretch_body.robot_params
#stretch_body.robot_params.RobotParams.set_logging_level("DEBUG")
import unittest
import stretch_body.device
import stretch_body.robot as robot
import numpy as np
class TestTimingStats(unittest.TestCase):
|
def test_thread_starvation_group_sync_read(self):
robot = stretch_body.robot.Robot()
robot.end_of_arm.params['use_group_sync_read']=1
print(robot.end_of_arm.joints)
print('Starting test_thread_starvation')
print('Latency timer of %f'%robot.end_of_arm.params['dxl_latency_timer'])
print('Testing on tool %s'%robot.params['tool'])
robot.startup()
try:
for itr in range(100): #Make large CPU load
x = np.random.rand(3, 1000, 1000)
x.tolist()
except (IndexError, IOError) as e:
self.fail("IndexError or IOError failure in comms")
self.assertTrue(robot.end_of_arm.comm_errors.status['n_rx']<2)
robot.end_of_arm.comm_errors.pretty_print()
robot.stop()
|
|
_container_registry_management_client.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import ContainerRegistryManagementClientConfiguration
from .operations import ConnectedRegistriesOperations
from .operations import ExportPipelinesOperations
from .operations import RegistriesOperations
from .operations import ImportPipelinesOperations
from .operations import Operations
from .operations import PipelineRunsOperations
from .operations import PrivateEndpointConnectionsOperations
from .operations import ReplicationsOperations
from .operations import ScopeMapsOperations
from .operations import TokensOperations
from .operations import WebhooksOperations
from .. import models
class ContainerRegistryManagementClient(object):
"""ContainerRegistryManagementClient.
:ivar connected_registries: ConnectedRegistriesOperations operations
:vartype connected_registries: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ConnectedRegistriesOperations
:ivar export_pipelines: ExportPipelinesOperations operations
:vartype export_pipelines: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ExportPipelinesOperations
:ivar registries: RegistriesOperations operations
:vartype registries: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.RegistriesOperations
:ivar import_pipelines: ImportPipelinesOperations operations
:vartype import_pipelines: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ImportPipelinesOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.Operations
:ivar pipeline_runs: PipelineRunsOperations operations
:vartype pipeline_runs: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.PipelineRunsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.PrivateEndpointConnectionsOperations
:ivar replications: ReplicationsOperations operations
:vartype replications: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ReplicationsOperations
:ivar scope_maps: ScopeMapsOperations operations
:vartype scope_maps: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ScopeMapsOperations
:ivar tokens: TokensOperations operations
:vartype tokens: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.TokensOperations
:ivar webhooks: WebhooksOperations operations
:vartype webhooks: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.WebhooksOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Microsoft Azure subscription ID.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
|
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ContainerRegistryManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
if not base_url:
base_url = 'https://management.azure.com'
self._config = ContainerRegistryManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.connected_registries = ConnectedRegistriesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.export_pipelines = ExportPipelinesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.registries = RegistriesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.import_pipelines = ImportPipelinesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.pipeline_runs = PipelineRunsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.replications = ReplicationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.scope_maps = ScopeMapsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.tokens = TokensOperations(
self._client, self._config, self._serialize, self._deserialize)
self.webhooks = WebhooksOperations(
self._client, self._config, self._serialize, self._deserialize)
|
Grid.js
|
import Cell from './Cell';
import * as bitmask from 'bitwise-mask';
const TOP = 1 << 0;
const RIGHT = 1 << 1;
const BOTTOM = 1 << 2;
const LEFT = 1 << 3;
const TOP_RIGHT = TOP | RIGHT;
const BOTTOM_RIGHT = BOTTOM | RIGHT;
const BOTTOM_LEFT = BOTTOM | LEFT;
const TOP_LEFT = TOP | LEFT;
/**
* @class Grid
*/
export default class Grid {
/**
* @constructs Grid
* @param {uint} [dx = 0]
* @param {uint} [dy = 0]
*/
constructor(dx = 0, dy = 0) {
this.dx = dx;
this.dy = dy;
this.cells = this.getCells(dx, dy);
}
/**
* @method getCells
* @private
* @param {uint} dx
* @param {uint} dy
* @returns {Cell[][]}
*/
getCells(dx, dy) {
const cells = new Array(dx);
for(let x = 0; x < dx; x++) {
cells[x] = new Array(dy);
for(let y = 0; y < dy; y++) {
cells[x][y] = new Cell(x, y);
}
}
return cells;
}
/**
* @method contains
* @public
* @param {uint} x
* @param {uint} y
*/
contains(x, y) {
return x >= 0 && x < this.dx && y >= 0 && y < this.dy;
}
/**
* @method getCellAt
* @public
* @param {uint} x
* @param {uint} y
* @returns {Cell}
*/
getCellAt(x, y) {
return this.cells[x][y];
}
/**
|
* @param {uint} x
* @param {uint} y
* @param {boolean} [diagonal = false]
* @returns {Cell[]}
*/
getNeighborsAt(x, y, diagonal = false) {
const neighbors = [];
let mask = bitmask.create();
// ↑
if(this.contains(x, y - 1)) {
const cell = this.getCellAt(x, y - 1);
if(cell.isWalkable) {
mask = bitmask.add(mask, TOP);
neighbors.push(cell);
}
}
// →
if(this.contains(x + 1, y)) {
const cell = this.getCellAt(x + 1, y);
if(cell.isWalkable) {
mask = bitmask.add(mask, RIGHT);
neighbors.push(cell);
}
}
// ↓
if(this.contains(x, y + 1)) {
const cell = this.getCellAt(x, y + 1);
if(cell.isWalkable) {
mask = bitmask.add(mask, BOTTOM);
neighbors.push(cell);
}
}
// ←
if(this.contains(x - 1, y)) {
const cell = this.getCellAt(x - 1, y);
if(cell.isWalkable) {
mask = bitmask.add(mask, LEFT);
neighbors.push(cell);
}
}
if(diagonal) {
// ↗
if(this.contains(x + 1, y - 1)) {
const cell = this.getCellAt(x + 1, y - 1);
if(cell.isWalkable && bitmask.contains(mask, TOP_RIGHT)) {
neighbors.push(cell);
}
}
// ↘
if(this.contains(x + 1, y + 1)) {
const cell = this.getCellAt(x + 1, y + 1);
if(cell.isWalkable && bitmask.contains(mask, BOTTOM_RIGHT)) {
neighbors.push(cell);
}
}
// ↙
if(this.contains(x - 1, y + 1)) {
const cell = this.getCellAt(x - 1, y + 1);
if(cell.isWalkable && bitmask.contains(mask, BOTTOM_LEFT)) {
neighbors.push(cell);
}
}
// ↖
if(this.contains(x - 1, y - 1)) {
const cell = this.getCellAt(x - 1, y - 1);
if(cell.isWalkable && bitmask.contains(mask, TOP_LEFT)) {
neighbors.push(cell);
}
}
}
return neighbors;
}
/**
* @method dispose
* @public
*/
dispose() {
for(let cell of this.cells) {
cell.dispose();
}
this.cells = null;
}
}
|
* @method getNeighborsAt
* @public
|
sale.component.ts
|
import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-sale',
templateUrl: './sale.component.html'
})
export class
|
implements OnInit {
constructor() { }
ngOnInit() {
}
}
|
SaleComponent
|
matrix.rs
|
//! An efficient representation of a 2D matrix.
use enso_prelude::default;
use std::ops::Index;
use std::ops::IndexMut;
// ============
// == Matrix ==
// ============
/// An efficient 2D matrix implemented on top of [`std::vec::Vec`].
#[derive(Clone,Debug,Default,PartialEq,Eq)]
pub struct Matrix<T> {
/// The number of rows in the matrix.
rows: usize,
/// The number of columns in the matrix.
columns: usize,
/// The matrix.
matrix: Vec<T>,
}
impl<T:Default> Matrix<T> {
/// Constructs a matrix with the dimensions given by `rows` and `columns`.
pub fn new(rows:usize, columns:usize) -> Self {
let mut matrix = Vec::with_capacity(rows*columns);
for _ in 0..matrix.capacity() {
matrix.push(default())
}
Self{rows,columns,matrix}
}
/// Adds a new row to the matrix `self`, filled with default values.
pub fn new_row(&mut self) {
|
for _ in 0..self.columns {
self.matrix.push(default());
}
self.rows += 1;
}
}
// === Trait Impls ===
impl<T> Index<(usize,usize)> for Matrix<T> {
type Output = T;
fn index(&self, index:(usize,usize)) -> &T {
&self.matrix[index.0*self.columns+index.1]
}
}
impl<T> IndexMut<(usize,usize)> for Matrix<T> {
fn index_mut(&mut self, index:(usize,usize)) -> &mut T {
&mut self.matrix[index.0*self.columns+index.1]
}
}
| |
main_test.go
|
//=============================================================================
// File: main_test.go
// Contents: SETUP and TEARDOWN for tests
//=============================================================================
package figtree_test
import (
"fmt"
"os"
"testing"
)
func TestMain(m *testing.M)
|
{
fmt.Println("=== SETUP")
os.Remove("testdata/actual/include-figtree")
os.Remove("testdata/actual/include-internal")
os.Remove("testdata/actual/sample-figtree")
os.Remove("testdata/actual/sample-internal")
rc := m.Run()
fmt.Println("=== TEARDOWN")
os.Exit(rc)
}
|
|
conversations.spec.js
|
import conversationsAPI from '../conversations';
import ApiClient from '../ApiClient';
import describeWithAPIMock from './apiSpecHelper';
describe('#ConversationApi', () => {
it('creates correct instance', () => {
expect(conversationsAPI).toBeInstanceOf(ApiClient);
expect(conversationsAPI).toHaveProperty('get');
expect(conversationsAPI).toHaveProperty('show');
expect(conversationsAPI).toHaveProperty('create');
expect(conversationsAPI).toHaveProperty('update');
expect(conversationsAPI).toHaveProperty('delete');
expect(conversationsAPI).toHaveProperty('getLabels');
expect(conversationsAPI).toHaveProperty('updateLabels');
});
describeWithAPIMock('API calls', context => {
it('#getLabels', () => {
conversationsAPI.getLabels(1);
expect(context.axiosMock.get).toHaveBeenCalledWith(
'/api/v1/conversations/1/labels'
);
});
it('#updateLabels', () => {
const labels = ['support-query'];
conversationsAPI.updateLabels(1, labels);
expect(context.axiosMock.post).toHaveBeenCalledWith(
'/api/v1/conversations/1/labels',
{
labels,
}
);
});
it('#pinConverstation', () => {
const converstationID = ['support-query'];
conversationsAPI.pinConverstation(1, converstationID);
expect(context.axiosMock.post).toHaveBeenCalledWith(
'/api/v1/conversations/1/pinned',
{
converstationID,
}
);
|
});
|
});
});
|
mumps.ts
|
export class MumpsVirtualDocument {
static readonly schemes = {
compiled: 'compiledMumps',
coverage: 'coverageMumps',
};
readonly parsedDocument: ParsedDocument;
constructor(
readonly routineName: string,
readonly sourceCode: string,
/**
* Uri with scheme in `mumpsSchemes`
*/
readonly uri: Uri,
) {
this.parsedDocument = parseText(sourceCode);
virtualDocuments.set(uri.toString(), this);
}
}
export class MumpsDocumentProvider implements TextDocumentContentProvider {
provideTextDocumentContent(uri: Uri): string {
return getVirtualDocument(uri).sourceCode;
}
}
export function getVirtualDocument(uri: Uri) {
return virtualDocuments.get(uri.toString());
}
function isScheme(uri: Uri) {
return Object.values(MumpsVirtualDocument.schemes).indexOf(uri.scheme) > -1;
}
/**
* Virtual Documents keyed by the string the string representation of their `Uri`s
*/
const virtualDocuments = new Map<string, MumpsVirtualDocument>();
const _onDidDeleteVirtualMumps = new EventEmitter<Uri>();
export const onDidDeleteVirtualMumps = _onDidDeleteVirtualMumps.event;
workspace.onDidCloseTextDocument(textDocument => {
const uri = textDocument.uri;
if (isScheme(uri)) {
virtualDocuments.delete(uri.toString());
_onDidDeleteVirtualMumps.fire(uri);
}
});
|
import { EventEmitter, TextDocumentContentProvider, Uri, workspace } from 'vscode';
import { ParsedDocument, parseText } from '../parser';
|
|
item.rs
|
use super::{
comment::comment, statement::statement, template::whitespace, writ::writ, Res, Span, Statement,
Static, Writ,
};
use nom::branch::alt;
use nom::bytes::complete::tag;
use nom::character::complete::char;
use nom::combinator::{cut, opt};
use nom::error::VerboseError;
use nom::sequence::tuple;
use proc_macro2::TokenStream;
use quote::{quote, ToTokens, TokenStreamExt};
#[derive(Debug, PartialEq, Eq)]
pub enum Item<'a> {
Comment,
Writ(Writ<'a>),
Statement(Statement<'a>),
Static(Static),
CompileError(String),
}
impl ToTokens for Item<'_> {
fn to_tokens(&self, tokens: &mut TokenStream) {
tokens.append_all(match self {
Item::Comment => quote! {},
Item::Writ(writ) => quote! { #writ },
Item::Statement(statement) => quote! { #statement },
Item::Static(text) => quote! { #text },
Item::CompileError(text) => quote! { compile_error!(#text); },
});
}
}
#[derive(Debug)]
pub enum TagOpen {
Writ,
Statement,
Comment,
}
pub fn parse_tag(input: Span) -> Res<&str, Vec<Item>> {
let (input, (leading_whitespace, open)) = tag_start(input)?;
let parser = match open {
TagOpen::Writ => writ,
TagOpen::Statement => statement,
TagOpen::Comment => comment,
};
let (input, (tag, trailing_whitespace)) = cut(parser)(input)?;
let mut items = vec![];
if let Some(leading_whitespace) = leading_whitespace {
items.push(leading_whitespace.into());
}
items.push(tag);
if let Some(trailing_whitespace) = trailing_whitespace {
|
Ok((input, items))
}
pub fn tag_start(input: Span) -> Res<&str, (Option<Static>, TagOpen)> {
let (input, (whitespace, open, command)) = tuple((
// Whitespace is optional, but tracked because it could be altered by tag.
opt(whitespace),
// Check if this is actually a tag; if it's not, that's fine, just return early.
tag_open,
// Whitespace control characters are optional.
opt(alt((collapse_whitespace_command, trim_whitespace_command))),
))(input)?;
let whitespace = match command {
// Collapse to a single space if there's any leading whitespace.
Some('_') => whitespace.map(|_| Static(" ".to_string())),
// Remove any leading whitespace.
Some('-') => None,
Some(_) => unreachable!("Only - or _ should be matched"),
// Convert any leading whitespace to `Static()` without adjusting.
None => whitespace.map(|whitespace| Static(whitespace.to_string())),
};
Ok((input, (whitespace, open)))
}
pub fn tag_end(tag_close: &str) -> impl Fn(Span) -> Res<&str, Option<Static>> + '_ {
move |input| {
if let Ok((input, _tag)) = tag::<_, _, VerboseError<_>>(tag_close)(input) {
return Ok((input, None));
}
let (input, (command, _, whitespace)) = tuple((
alt((collapse_whitespace_command, trim_whitespace_command)),
tag(tag_close),
opt(whitespace),
))(input)?;
let whitespace = match command {
'_' => whitespace.map(|_| Static(" ".to_string())),
'-' => None,
_ => unreachable!("Only - or _ should be matched"),
};
Ok((input, whitespace))
}
}
pub fn tag_open(input: Span) -> Res<&str, TagOpen> {
let (input, output) = alt((
tag("{{"), // writ
tag("{%"), // statement
tag("{#"), // comment
))(input)?;
match output {
"{{" => Ok((input, TagOpen::Writ)),
"{%" => Ok((input, TagOpen::Statement)),
"{#" => Ok((input, TagOpen::Comment)),
_ => panic!("This should never happen"),
}
}
fn collapse_whitespace_command(input: Span) -> Res<&str, char> {
char('_')(input)
}
fn trim_whitespace_command(input: Span) -> Res<&str, char> {
char('-')(input)
}
|
items.push(trailing_whitespace.into());
}
|
fsshell.go
|
package webhdfs
import "os"
import "bytes"
import "fmt"
import "io"
import "path"
import "io/ioutil"
const MAX_UP_CHUNK int64 = 1 * (1024 * 1024) * 1024 // 1 GB.
const MAX_DOWN_CHUNK int64 = 500 * (1024 * 1024) // 500 MB
type FsShell struct {
FileSystem *FileSystem
WorkingPath string
}
// Appends the specified list of local files to the HDFS path.
func (shell FsShell) AppendToFile(filePaths []string, hdfsPath string) (bool, error) {
for _, path := range filePaths {
file, err := os.Open(path)
if err != nil {
return false, err
}
defer file.Close()
data, _, err := slirpLocalFile(*file, 0)
if err != nil {
return false, err
}
_, err = shell.FileSystem.Append(bytes.NewBuffer(data), Path{Name: hdfsPath}, 0)
if err != nil {
return false, err
}
}
return true, nil
}
// Returns a writer with the content of the specified files.
func (shell FsShell) Cat(hdfsPaths []string, writr io.Writer) error {
for _, path := range hdfsPaths {
stat, err := shell.FileSystem.GetFileStatus(Path{Name: path})
if err != nil {
return err
}
//TODO add code to chunk super large files.
if stat.Length < MAX_DOWN_CHUNK {
readr, err := shell.FileSystem.Open(Path{Name: path}, 0, stat.Length, 4096)
if err != nil {
return err
}
io.Copy(writr, readr)
}
}
return nil
}
// Changes the group association of the given hdfs paths.
func (shell FsShell) Chgrp(hdfsPaths []string, grpName string) (bool, error) {
for _, path := range hdfsPaths {
_, err := shell.FileSystem.SetOwner(Path{Name: path}, "", grpName)
if err != nil {
return false, err
}
}
return true, nil
}
// Changes the owner of the specified hdfs paths.
func (shell FsShell) Chown(hdfsPaths []string, owner string) (bool, error) {
for _, path := range hdfsPaths {
_, err := shell.FileSystem.SetOwner(Path{Name: path}, owner, "")
if err != nil {
return false, err
}
}
return true, nil
}
// Changes the filemode of the provided hdfs paths.
func (shell FsShell) Chmod(hdfsPaths []string, perm os.FileMode) (bool, error) {
for _, path := range hdfsPaths {
_, err := shell.FileSystem.SetPermission(Path{Name: path}, perm)
if err != nil {
|
}
// Tests the existence of a remote HDFS file/directory.
func (shell FsShell) Exists(hdfsPath string) (bool, error) {
_, err := shell.FileSystem.GetFileStatus(Path{Name: hdfsPath})
if err != nil {
if remoteErr, ok := err.(RemoteException); ok && remoteErr.JavaClassName == "java.io.FileNotFoundException" {
return false, nil
} else {
return false, err /* a different err */
}
}
return true, nil
}
// Copies one specified local file to the remote HDFS server.
// Uses default permission, blocksize, and replication.
func (shell FsShell) Put(localFile string, hdfsPath string, overwrite bool) (bool, error) {
if _, err := os.Stat(localFile); os.IsNotExist(err) {
return false, fmt.Errorf("File %v not found.", localFile)
}
file, err := os.Open(localFile)
if err != nil {
return false, err
}
defer file.Close()
// put as a new remote file
_, err = shell.FileSystem.Create(
file,
Path{Name: hdfsPath + "/" + µ(path.Split(localFile))[1].(string)},
overwrite,
134217728,
3,
0644,
4096)
if err != nil {
return false, err
}
return true, nil
}
// Copies sepcified local files to remote HDFS server.
// The hdfsPath must be a directory (created if it does not exist).
// Uses default permission, blocksize, and replication.
func (shell FsShell) PutMany(files []string, hdfsPath string, overwrite bool) (bool, error) {
// if multiple files, put in remote directory
if len(files) > 1 {
stat, err := shell.FileSystem.GetFileStatus(Path{Name: hdfsPath})
// if remote dir missing, crete it.
if remoteErr := err.(RemoteException); remoteErr.JavaClassName == "java.io.FileNotFoundException" {
if _, err := shell.FileSystem.MkDirs(Path{Name: hdfsPath}, 0700); err != nil {
return false, err
}
}
if stat.Type == "FILE" {
return false, fmt.Errorf("HDFS resource %s must be a directory in this context.", hdfsPath)
}
}
for _, file := range files {
shell.Put(file, hdfsPath+"/"+µ(path.Split(file))[1].(string), overwrite)
}
return true, nil
}
// Retrieves a remote HDFS file and saves as the specified local file.
func (shell FsShell) Get(hdfsPath, localFile string) (bool, error) {
file, err := os.Create(localFile)
if err != nil {
return false, err
}
defer file.Close()
reader, err := shell.FileSystem.Open(Path{Name: hdfsPath}, 0, 0, 0)
if err != nil {
return false, err
}
data, err := ioutil.ReadAll(reader)
if err != nil {
return false, err
}
defer reader.Close()
_, err = file.Write(data)
if err != nil {
return false, err
}
file.Sync()
return true, nil
}
// Merges content of remote HDFS path into a single local file.
// func (shell FsShell) GetMerge(hdfsPath, localFile string)(bool, error){
// return false, fmt.Errorf("Function is unimplemented.")
// }
// Copies local file to remote destination, then local file is removed.
func (shell FsShell) MoveFromLocal(localFile, hdfsPath string, overwrite bool) (bool, error) {
ok, err := shell.Put(localFile, hdfsPath, overwrite)
// validate operation, then remove local
if ok && err != nil {
hdfStat, err := shell.FileSystem.GetFileStatus(Path{Name: hdfsPath})
if err != nil {
return false, fmt.Errorf("Unable to verify remote file. ", err.Error())
}
file, err := os.Open(localFile)
if err != nil {
return false, fmt.Errorf("Unable to validate operation. ", err.Error())
}
if hdfStat.Length != µ(file.Stat())[0].(os.FileInfo).Size() {
return false, fmt.Errorf("Remote and local file size mismatch.")
}
file.Close() // close now.
err = os.Remove(localFile) // remove it.
if err != nil {
return false, err
}
} else {
return false, err
}
return true, nil
}
// Copies remote HDFS file locally. The remote file is then removed.
func (shell FsShell) MoveToLocal(hdfsPath, localFile string) (bool, error) {
hdfStat, err := shell.FileSystem.GetFileStatus(Path{Name: hdfsPath})
_, err = shell.Get(hdfsPath, localFile)
if err != nil {
return false, err
}
file, err := os.Open(localFile)
if err != nil {
return false, fmt.Errorf("Unable to access local file %s: %s", localFile, err.Error())
}
defer file.Close()
fileStat, err := file.Stat()
if err != nil {
return false, fmt.Errorf("Unable to access local file %s: %s", localFile, err.Error())
}
// ensure file was copied all the way
if hdfStat.Length != fileStat.Size() {
return false, fmt.Errorf("Local file size does not match remote file size. Aborting.")
}
// remove remote File
ok, err := shell.FileSystem.Delete(Path{Name: hdfsPath}, false)
if err != nil {
return false, fmt.Errorf("Unable to remove remote %s file: %s", hdfsPath, err.Error())
}
return ok, nil
}
// Removes the specified HDFS source.
func (shell FsShell) Rm(hdfsPath string) (bool, error) {
return false, fmt.Errorf("Function is unimplemented.")
}
// TODO: slirp file in x Gbyte chunks when file.Stat() >> X.
// this is to avoid blow up memory on large files.
func slirpLocalFile(file os.File, offset int64) ([]byte, int64, error) {
stat, err := file.Stat()
if err != nil {
return nil, 0, err
}
if stat.Size() < MAX_UP_CHUNK {
data, err := ioutil.ReadFile(file.Name())
if err != nil {
return nil, 0, err
}
return data, 0, nil
} // else chunck it
return nil, 0, nil
}
//TODO: slirp file in X GBytes chucks from server to avoid blowing up network.
// func slirpRemoteFile (hdfsPath string, offset int64, totalSize int64)([]byte, int64, error) {
// }
|
return false, err
}
}
return true, nil
|
oar1.rs
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::OAR1 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct OA1R {
bits: u16,
}
impl OA1R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct OA1MODER {
bits: bool,
}
impl OA1MODER {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct OA1ENR {
bits: bool,
}
impl OA1ENR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Proxy"]
pub struct _OA1W<'a> {
w: &'a mut W,
}
impl<'a> _OA1W<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 1023;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _OA1MODEW<'a> {
w: &'a mut W,
}
impl<'a> _OA1MODEW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 10;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _OA1ENW<'a> {
w: &'a mut W,
}
impl<'a> _OA1ENW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 15;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:9 - Interface address"]
#[inline]
pub fn oa1(&self) -> OA1R {
let bits = {
const MASK: u16 = 1023;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
OA1R { bits }
}
#[doc = "Bit 10 - Own Address 1 10-bit mode"]
#[inline]
pub fn oa1mode(&self) -> OA1MODER {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 10;
((self.bits >> OFFSET) & MASK as u32) != 0
};
OA1MODER { bits }
}
#[doc = "Bit 15 - Own Address 1 enable"]
#[inline]
pub fn oa1en(&self) -> OA1ENR
|
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:9 - Interface address"]
#[inline]
pub fn oa1(&mut self) -> _OA1W {
_OA1W { w: self }
}
#[doc = "Bit 10 - Own Address 1 10-bit mode"]
#[inline]
pub fn oa1mode(&mut self) -> _OA1MODEW {
_OA1MODEW { w: self }
}
#[doc = "Bit 15 - Own Address 1 enable"]
#[inline]
pub fn oa1en(&mut self) -> _OA1ENW {
_OA1ENW { w: self }
}
}
|
{
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 15;
((self.bits >> OFFSET) & MASK as u32) != 0
};
OA1ENR { bits }
}
|
shootout-meteor.rs
|
// The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// Copyright (c) 2013-2014 The Rust Project Developers
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// - Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of "The Computer Language Benchmarks Game" nor
// the name of "The Computer Language Shootout Benchmarks" nor the
// names of its contributors may be used to endorse or promote
// products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
// no-pretty-expanded FIXME #15189
use std::iter::repeat;
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::thread;
//
// Utilities.
//
// returns an infinite iterator of repeated applications of f to x,
// i.e. [x, f(x), f(f(x)), ...], as haskell iterate function.
fn iterate<T, F>(x: T, f: F) -> Iterate<T, F> where F: FnMut(&T) -> T {
Iterate {f: f, next: x}
}
struct Iterate<T, F> where F: FnMut(&T) -> T {
f: F,
next: T
}
impl<T, F> Iterator for Iterate<T, F> where F: FnMut(&T) -> T {
type Item = T;
fn next(&mut self) -> Option<T> {
let mut res = (self.f)(&self.next);
std::mem::swap(&mut res, &mut self.next);
Some(res)
}
}
// a linked list using borrowed next.
enum List<'a, T:'a> {
Nil,
Cons(T, &'a List<'a, T>)
}
struct ListIterator<'a, T:'a> {
cur: &'a List<'a, T>
}
impl<'a, T> List<'a, T> {
fn iter(&'a self) -> ListIterator<'a, T> {
ListIterator{cur: self}
}
}
impl<'a, T> Iterator for ListIterator<'a, T> {
type Item = &'a T;
fn next(&mut self) -> Option<&'a T> {
match *self.cur {
List::Nil => None,
List::Cons(ref elt, next) => {
self.cur = next;
Some(elt)
}
}
}
}
//
// preprocess
//
// Takes a pieces p on the form [(y1, x1), (y2, x2), ...] and returns
// every possible transformations (the 6 rotations with their
// corresponding mirrored piece), with, as minimum coordinates, (0,
// 0). If all is false, only generate half of the possibilities (used
// to break the symmetry of the board).
fn transform(piece: Vec<(i32, i32)> , all: bool) -> Vec<Vec<(i32, i32)>> {
let mut res: Vec<Vec<(i32, i32)>> =
// rotations
iterate(piece, |rot| rot.iter().map(|&(y, x)| (x + y, -y)).collect())
.take(if all {6} else {3})
// mirror
.flat_map(|cur_piece| {
iterate(cur_piece, |mir| mir.iter().map(|&(y, x)| (x, y)).collect())
.take(2)
}).collect();
// translating to (0, 0) as minimum coordinates.
for cur_piece in &mut res {
let (dy, dx) = *cur_piece.iter().min_by(|e| *e).unwrap();
for &mut (ref mut y, ref mut x) in cur_piece {
*y -= dy; *x -= dx;
}
}
res
}
// A mask is a piece somewhere on the board. It is represented as a
// u64: for i in the first 50 bits, m[i] = 1 if the cell at (i/5, i%5)
// is occupied. m[50 + id] = 1 if the identifier of the piece is id.
// Takes a piece with minimum coordinate (0, 0) (as generated by
// transform). Returns the corresponding mask if p translated by (dy,
// dx) is on the board.
fn mask(dy: i32, dx: i32, id: usize, p: &Vec<(i32, i32)>) -> Option<u64> {
let mut m = 1 << (50 + id);
for &(y, x) in p {
let x = x + dx + (y + (dy % 2)) / 2;
if x < 0 || x > 4 {return None;}
let y = y + dy;
if y < 0 || y > 9 {return None;}
m |= 1 << (y * 5 + x) as usize;
}
Some(m)
}
// Makes every possible masks. masks[i][id] correspond to every
// possible masks for piece with identifier id with minimum coordinate
// (i/5, i%5).
fn make_masks() -> Vec<Vec<Vec<u64> > > {
let pieces = vec!(
vec!((0,0),(0,1),(0,2),(0,3),(1,3)),
vec!((0,0),(0,2),(0,3),(1,0),(1,1)),
vec!((0,0),(0,1),(0,2),(1,2),(2,1)),
vec!((0,0),(0,1),(0,2),(1,1),(2,1)),
vec!((0,0),(0,2),(1,0),(1,1),(2,1)),
vec!((0,0),(0,1),(0,2),(1,1),(1,2)),
vec!((0,0),(0,1),(1,1),(1,2),(2,1)),
vec!((0,0),(0,1),(0,2),(1,0),(1,2)),
vec!((0,0),(0,1),(0,2),(1,2),(1,3)),
vec!((0,0),(0,1),(0,2),(0,3),(1,2)));
// To break the central symmetry of the problem, every
// transformation must be taken except for one piece (piece 3
// here).
let transforms: Vec<Vec<Vec<(i32, i32)>>> =
pieces.into_iter().enumerate()
.map(|(id, p)| transform(p, id != 3))
.collect();
(0..50).map(|yx| {
transforms.iter().enumerate().map(|(id, t)| {
t.iter().filter_map(|p| mask(yx / 5, yx % 5, id, p)).collect()
}).collect()
}).collect()
}
// Check if all coordinates can be covered by an unused piece and that
// all unused piece can be placed on the board.
fn is_board_unfeasible(board: u64, masks: &Vec<Vec<Vec<u64>>>) -> bool {
let mut coverable = board;
for (i, masks_at) in masks.iter().enumerate() {
if board & 1 << i != 0 { continue; }
for (cur_id, pos_masks) in masks_at.iter().enumerate() {
if board & 1 << (50 + cur_id) != 0 { continue; }
for &cur_m in pos_masks {
if cur_m & board != 0 { continue; }
coverable |= cur_m;
// if every coordinates can be covered and every
// piece can be used.
if coverable == (1 << 60) - 1 { return false; }
}
}
if coverable & 1 << i == 0 { return true; }
}
true
}
// Filter the masks that we can prove to result to unfeasible board.
fn filter_masks(masks: &mut Vec<Vec<Vec<u64>>>) {
for i in 0..masks.len() {
for j in 0..(*masks)[i].len() {
masks[i][j] =
(*masks)[i][j].iter().cloned()
.filter(|&m| !is_board_unfeasible(m, masks))
.collect();
}
}
}
// Gets the identifier of a mask.
fn get_id(m: u64) -> u8 {
for id in 0..10 {
if m & (1 << (id + 50) as usize) != 0 {return id;}
}
panic!("{:016x} does not have a valid identifier", m);
}
// Converts a list of mask to a Vec<u8>.
fn to_vec(raw_sol: &List<u64>) -> Vec<u8> {
let mut sol = repeat('.' as u8).take(50).collect::<Vec<_>>();
for &m in raw_sol.iter() {
let id = '0' as u8 + get_id(m);
for i in 0..50 {
if m & 1 << i != 0 {
sol[i] = id;
}
}
}
sol
}
// Prints a solution in Vec<u8> form.
fn print_sol(sol: &Vec<u8>) {
for (i, c) in sol.iter().enumerate() {
if (i) % 5 == 0 { println!(""); }
if (i + 5) % 10 == 0 { print!(" "); }
print!("{} ", *c as char);
}
println!("");
}
// The data managed during the search
struct Data {
// Number of solution found.
nb: isize,
// Lexicographically minimal solution found.
min: Vec<u8>,
// Lexicographically maximal solution found.
max: Vec<u8>
}
impl Data {
fn new() -> Data
|
fn reduce_from(&mut self, other: Data) {
self.nb += other.nb;
let Data { min: min, max: max, ..} = other;
if min < self.min { self.min = min; }
if max > self.max { self.max = max; }
}
}
// Records a new found solution. Returns false if the search must be
// stopped.
fn handle_sol(raw_sol: &List<u64>, data: &mut Data) {
// because we break the symmetry, 2 solutions correspond to a call
// to this method: the normal solution, and the same solution in
// reverse order, i.e. the board rotated by half a turn.
data.nb += 2;
let sol1 = to_vec(raw_sol);
let sol2: Vec<u8> = sol1.iter().rev().cloned().collect();
if data.nb == 2 {
data.min = sol1.clone();
data.max = sol1.clone();
}
if sol1 < data.min {data.min = sol1;}
else if sol1 > data.max {data.max = sol1;}
if sol2 < data.min {data.min = sol2;}
else if sol2 > data.max {data.max = sol2;}
}
fn search(
masks: &Vec<Vec<Vec<u64>>>,
board: u64,
mut i: usize,
cur: List<u64>,
data: &mut Data)
{
// Search for the lesser empty coordinate.
while board & (1 << i) != 0 && i < 50 {i += 1;}
// the board is full: a solution is found.
if i >= 50 {return handle_sol(&cur, data);}
let masks_at = &masks[i];
// for every unused piece
for id in (0..10).filter(|&id| board & (1 << (id + 50)) == 0) {
// for each mask that fits on the board
for m in masks_at[id].iter().filter(|&m| board & *m == 0) {
// This check is too costly.
//if is_board_unfeasible(board | m, masks) {continue;}
search(masks, board | *m, i + 1, List::Cons(*m, &cur), data);
}
}
}
fn par_search(masks: Vec<Vec<Vec<u64>>>) -> Data {
let masks = Arc::new(masks);
let (tx, rx) = channel();
// launching the search in parallel on every masks at minimum
// coordinate (0,0)
for m in (*masks)[0].iter().flat_map(|masks_pos| masks_pos.iter()) {
let masks = masks.clone();
let tx = tx.clone();
let m = *m;
thread::spawn(move|| {
let mut data = Data::new();
search(&*masks, m, 1, List::Cons(m, &List::Nil), &mut data);
tx.send(data).unwrap();
});
}
// collecting the results
drop(tx);
let mut data = rx.recv().unwrap();
for d in rx.iter() { data.reduce_from(d); }
data
}
fn main () {
let mut masks = make_masks();
filter_masks(&mut masks);
let data = par_search(masks);
println!("{} solutions found", data.nb);
print_sol(&data.min);
print_sol(&data.max);
println!("");
}
|
{
Data {nb: 0, min: vec!(), max: vec!()}
}
|
test_autoscaling.py
|
# Copyright 2022 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Dict
import cortex as cx
import pytest
import e2e.tests
TEST_APIS = [
{
"primary": "realtime/sleep",
"dummy": ["realtime/prime-generator"],
"query_params": {
"sleep": "1.0",
},
}
]
@pytest.mark.usefixtures("client")
@pytest.mark.parametrize("apis", TEST_APIS, ids=[api["primary"] for api in TEST_APIS])
def test_autoscaling(printer: Callable, config: Dict, client: cx.Client, apis: Dict[str, Any]):
|
skip_autoscaling_test = config["global"].get("skip_autoscaling", False)
if skip_autoscaling_test:
pytest.skip("--skip-autoscaling flag detected, skipping autoscaling tests")
e2e.tests.test_autoscaling(
printer,
client,
apis,
autoscaling_config=config["global"]["autoscaling_test_config"],
deploy_timeout=config["global"]["realtime_deploy_timeout"],
node_groups=config["aws"]["x86_nodegroups"],
)
|
|
model_audio.py
|
'''
Determined model def example:
https://github.com/determined-ai/determined/tree/master/examples/computer_vision/cifar10_pytorch
'''
import tempfile
from typing import Any, Dict, Sequence, Tuple, Union, cast
from functools import partial
import os
import boto3
import numpy as np
from sklearn.metrics import average_precision_score
import torch
from torch import nn
from determined.pytorch import DataLoader, PyTorchTrial, PyTorchTrialContext, LRScheduler
from backbone_pt import Backbone_Pt, Backbone_Audio
import utils_pt
from data_utils.load_data import load_data
from data_utils.download_data import download_from_s3
from data_utils.audio_dataset import *
from data_utils.audio_dataset import _collate_fn, _collate_fn_eval
# Constants about the dataset here (need to modify)
TorchData = Union[Dict[str, torch.Tensor], Sequence[torch.Tensor], torch.Tensor]
def accuracy_rate(predictions: torch.Tensor, labels: torch.Tensor) -> float:
"""Return the accuracy rate based on dense predictions and sparse labels."""
assert len(predictions) == len(labels), "Predictions and labels must have the same length."
assert len(labels.shape) == 1, "Labels must be a column vector."
return ( # type: ignore
float((predictions.argmax(1) == labels.to(torch.long)).sum()) / predictions.shape[0]
)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class BackboneTrial(PyTorchTrial):
def __init__(self, trial_context: PyTorchTrialContext) -> None:
self.context = trial_context
self.hparams = AttrDict(trial_context.get_hparams())
self.last_epoch = 0
self.download_directory = self.download_data_from_s3()
#self.results = {"loss": float("inf"), "top1_accuracy": 0, "top5_accuracy": 0, "test_loss": float("inf"),
# "test_top1_accuracy": 0, "test_top5_accuracy": 0}
dataset_hypers = {'sEMG': (7, 1), 'ninapro': (18, 1), 'cifar10': (10, 3),
'smnist': (10, 1), 'cifar100':(100, 3), 'scifar100': (100, 3),
'audio': (200, 1)}
n_classes, in_channels = dataset_hypers[self.hparams.task]
print('task: ', self.hparams.task, 'in_channels: ', in_channels, 'classes: ', n_classes)
|
#for audio, use multilabel loss
if self.hparams.task == 'audio':
# where is the weights file?
self.criterion = nn.BCEWithLogitsLoss().cuda()
self.backbone = Backbone_Audio(depth, n_classes, width,
dropRate=self.hparams.droprate, in_channels=in_channels)
else:
self.criterion = nn.CrossEntropyLoss().cuda()
self.backbone = Backbone_Pt(
depth,
n_classes,
width,
dropRate=self.hparams.droprate,
in_channels=in_channels,
)
total_params = sum(p.numel() for p in self.backbone.parameters() if p.requires_grad)/ 1e6
print('Parameter size in MB(backbone): ', total_params)
self.model = self.context.wrap_model(self.backbone)
self.last_eval = 0
'''
Definition of optimizer
'''
nesterov = self.hparams.nesterov if self.hparams.momentum else False
self.opt = self.context.wrap_optimizer(torch.optim.SGD(
self.model.parameters(),
lr=self.hparams.learning_rate,
momentum=self.hparams.momentum,
weight_decay=self.hparams.weight_decay,
nesterov=nesterov)
)
self.lr_scheduler = self.context.wrap_lr_scheduler(
lr_scheduler=torch.optim.lr_scheduler.LambdaLR(
self.opt,
lr_lambda=self.weight_sched,
last_epoch=self.hparams.start_epoch - 1
),
step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH,
)
def weight_sched(self, epoch) -> Any:
if self.hparams.epochs != 200:
return 0.2 ** (epoch >= int(0.3 * self.hparams.epochs)) * 0.2 ** (epoch > int(0.6 * self.hparams.epochs)) * 0.2 ** (epoch > int(0.8 * self.hparams.epochs))
#print('using original weight schedule')
return 0.2 ** (epoch >= 60) * 0.2 ** (epoch >= 120) * 0.2 ** (epoch >=160)
def download_data_from_s3(self):
'''Download data from s3 to store in temp directory'''
s3_bucket = self.context.get_data_config()["bucket"]
#download_directory = f"/tmp/data-rank{self.context.distributed.get_rank()}"
#download_directory = "/tmp/data"
download_directory = os.getcwd()
s3 = boto3.client("s3")
#os.makedirs(download_directory, exist_ok=True)
download_from_s3(s3_bucket, self.hparams.task, download_directory)
if self.hparams.train:
self.train_data, self.val_data, self.test_data = load_data(self.hparams.task, download_directory, True, self.hparams.permute)
self.build_test_data_loader(download_directory)
else:
self.train_data, _, self.val_data = load_data(self.hparams.task, download_directory, False, self.hparams.permute)
return download_directory
def build_training_data_loader(self) -> DataLoader:
trainset = self.train_data
print(len(trainset))
train_loader = DataLoader(trainset, num_workers=4, batch_size=self.context.get_per_slot_batch_size(),
shuffle=True, sampler=None, collate_fn=_collate_fn,
pin_memory=False, drop_last=True)
print(len(train_loader))
return train_loader
def build_validation_data_loader(self) -> DataLoader:
valset = self.val_data
print(len(valset))
return DataLoader(valset, sampler=None, num_workers=4,
collate_fn=_collate_fn_eval,
shuffle=False, batch_size=1,
pin_memory=False
)
def build_test_data_loader(self, download_directory):
testset = self.test_data
print(len(testset))
#self.test_loader = torch.utils.data.DataLoader(testset, batch_size=self.context.get_per_slot_batch_size(),
# shuffle=False, num_workers=2)
return
'''
Train and Evaluate Methods
'''
def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
x_train, _, y_train = batch
self.model.train()
output = self.model(x_train)
loss = self.criterion(output, y_train)
self.context.backward(loss)
self.context.step_optimizer(self.opt)
return {
'loss': loss,
}
def evaluate_full_dataset(
self, data_loader: torch.utils.data.DataLoader,
) -> Dict[str, Any]:
if not self.hparams.train and self.hparams.task == 'audio':
return self.evaluate_audio_testset(self.val_data)
loss_avg = utils_pt.AverageMeter()
val_predictions = []
val_gts = []
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
input, target = batch
n = input.size(0)
logits = self.model(input)
logits = logits.mean(0).unsqueeze(0)
loss = self.criterion(logits, target)
#top1, top5 = utils_pt.accuracy(logits, target, topk=(1, 5))
#acc_top1.update(top1.item(), n)
#acc_top5.update(top5.item(), n)
loss_avg.update(loss, n)
logits_sigmoid = torch.sigmoid(logits)
val_predictions.append(logits_sigmoid.detach().cpu().numpy()[0])
val_gts.append(target.detach().cpu().numpy()[0])
val_preds = np.asarray(val_predictions).astype('float32')
val_gts = np.asarray(val_gts).astype('int32')
map_value = average_precision_score(val_gts, val_preds, average="macro")
results = {
"loss": loss_avg.avg,
"val_mAP": map_value,
}
'''
if self.hparams.train:
test_acc_top1 = utils_pt.AverageMeter()
test_acc_top5 = utils_pt.AverageMeter()
test_loss = utils_pt.AverageMeter()
with torch.no_grad():
for batch in self.test_loader:
batch = self.context.to_device(batch)
input, target = batch
n = input.size(0)
logits = self.model(input)
loss = self.criterion(logits, target)
top1, top5 = utils_pt.accuracy(logits, target, topk=(1, 5))
test_acc_top1.update(top1.item(), n)
test_acc_top5.update(top5.item(), n)
test_loss.update(loss, n)
results2 = {
"test_loss": test_loss.avg,
"test_top1_accuracy": test_acc_top1.avg,
"test_top5_accuracy": test_acc_top5.avg,
}
results.update(results2)
'''
if self.hparams.task == 'audio' and self.last_eval % 20 == 0:
results.update(self.evaluate_audio_testset(self.test_data))
self.last_eval += 1
return results
def evaluate_audio_testset(self, testset) -> Dict[str, torch.Tensor]:
cnt = 0
test_predictions = []
test_gts = []
for ix in range(testset.len):
with torch.no_grad():
batch = testset[ix]
x, y = batch
x = x.cuda()
y_pred = self.model(x)
y_pred = y_pred.mean(0).unsqueeze(0)
sigmoid_preds = torch.sigmoid(y_pred)
test_predictions.append(sigmoid_preds.detach().cpu().numpy()[0])
test_gts.append(y.detach().cpu().numpy()[0]) # drop batch axis
test_predictions = np.asarray(test_predictions).astype('float32')
test_gts = np.asarray(test_gts).astype('int32')
stats = calculate_stats(test_predictions, test_gts)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
results = {
"test_mAUC": mAUC,
"test_mAP": mAP,
}
return results
|
# Changing our backbone
depth = list(map(int, self.hparams.backbone.split(',')))[0]
width = list(map(int, self.hparams.backbone.split(',')))[1]
|
25.5bb6c60c.chunk.js
|
(window.webpackJsonp=window.webpackJsonp||[]).push([[25,117],{672:function(t,e,n){!function(t){"use strict";var e={autoSelfClosers:{area:!0,base:!0,br:!0,col:!0,command:!0,embed:!0,frame:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0,menuitem:!0},implicitlyClosed:{dd:!0,li:!0,optgroup:!0,option:!0,p:!0,rp:!0,rt:!0,tbody:!0,td:!0,tfoot:!0,th:!0,tr:!0},contextGrabbers:{dd:{dd:!0,dt:!0},dt:{dd:!0,dt:!0},li:{li:!0},option:{option:!0,optgroup:!0},optgroup:{optgroup:!0},p:{address:!0,article:!0,aside:!0,blockquote:!0,dir:!0,div:!0,dl:!0,fieldset:!0,footer:!0,form:!0,h1:!0,h2:!0,h3:!0,h4:!0,h5:!0,h6:!0,header:!0,hgroup:!0,hr:!0,menu:!0,nav:!0,ol:!0,p:!0,pre:!0,section:!0,table:!0,ul:!0},rp:{rp:!0,rt:!0},rt:{rp:!0,rt:!0},tbody:{tbody:!0,tfoot:!0},td:{td:!0,th:!0},tfoot:{tbody:!0},th:{td:!0,th:!0},thead:{tbody:!0,tfoot:!0},tr:{tr:!0}},doNotIndent:{pre:!0},allowUnquoted:!0,allowMissing:!0,caseFold:!0},n={autoSelfClosers:{},implicitlyClosed:{},contextGrabbers:{},doNotIndent:{},allowUnquoted:!1,allowMissing:!1,allowMissingTagName:!1,caseFold:!1};t.defineMode("xml",function(r,a){var o,i,l=r.indentUnit,c={},u=a.htmlMode?e:n;for(var s in u)c[s]=u[s];for(var s in a)c[s]=a[s];function d(t,e){function n(n){return e.tokenize=n,n(t,e)}var r=t.next();return"<"==r?t.eat("!")?t.eat("[")?t.match("CDATA[")?n(f("atom","]]>")):null:t.match("--")?n(f("comment","--\x3e")):t.match("DOCTYPE",!0,!0)?(t.eatWhile(/[\w\._\-]/),n(function t(e){return function(n,r){for(var a;null!=(a=n.next());){if("<"==a)return r.tokenize=t(e+1),r.tokenize(n,r);if(">"==a){if(1==e){r.tokenize=d;break}return r.tokenize=t(e-1),r.tokenize(n,r)}}return"meta"}}(1))):null:t.eat("?")?(t.eatWhile(/[\w\._\-]/),e.tokenize=f("meta","?>"),"meta"):(o=t.eat("/")?"closeTag":"openTag",e.tokenize=m,"tag bracket"):"&"==r?(t.eat("#")?t.eat("x")?t.eatWhile(/[a-fA-F\d]/)&&t.eat(";"):t.eatWhile(/[\d]/)&&t.eat(";"):t.eatWhile(/[\w\.\-:]/)&&t.eat(";"))?"atom":"error":(t.eatWhile(/[^&<]/),null)}function m(t,e){var n=t.next();if(">"==n||"/"==n&&t.eat(">"))return e.tokenize=d,o=">"==n?"endTag":"selfcloseTag","tag bracket";if("="==n)return o="equals",null;if("<"==n){e.tokenize=d,e.state=x,e.tagName=e.tagStart=null;var r=e.tokenize(t,e);return r?r+" tag error":"tag error"}return/[\'\"]/.test(n)?(e.tokenize=function(t){var e=function(e,n){for(;!e.eol();)if(e.next()==t){n.tokenize=m;break}return"string"};return e.isInAttribute=!0,e}(n),e.stringStartCol=t.column(),e.tokenize(t,e)):(t.match(/^[^\s\u00a0=<>\"\']*[^\s\u00a0=<>\"\'\/]/),"word")}function f(t,e){return function(n,r){for(;!n.eol();){if(n.match(e)){r.tokenize=d;break}n.next()}return t}}function g(t,e,n){this.prev=t.context,this.tagName=e,this.indent=t.indented,this.startOfLine=n,(c.doNotIndent.hasOwnProperty(e)||t.context&&t.context.noIndent)&&(this.noIndent=!0)}function p(t){t.context&&(t.context=t.context.prev)}function h(t,e){for(var n;;){if(!t.context)return;if(n=t.context.tagName,!c.contextGrabbers.hasOwnProperty(n)||!c.contextGrabbers[n].hasOwnProperty(e))return;p(t)}}function x(t,e,n){return"openTag"==t?(n.tagStart=e.column(),k):"closeTag"==t?v:x}function k(t,e,n){return"word"==t?(n.tagName=e.current(),i="tag",S):c.allowMissingTagName&&"endTag"==t?(i="tag bracket",S(t,0,n)):(i="error",k)}function v(t,e,n){if("word"==t){var r=e.current();return n.context&&n.context.tagName!=r&&c.implicitlyClosed.hasOwnProperty(n.context.tagName)&&p(n),n.context&&n.context.tagName==r||!1===c.matchClosing?(i="tag",b):(i="tag error",T)}return c.allowMissingTagName&&"endTag"==t?(i="tag bracket",b(t,0,n)):(i="error",T)}function b(t,e,n){return"endTag"!=t?(i="error",b):(p(n),x)}function T(t,e,n){return i="error",b(t,0,n)}function S(t,e,n){if("word"==t)return i="attribute",w;if("endTag"==t||"selfcloseTag"==t){var r=n.tagName,a=n.tagStart;return n.tagName=n.tagStart=null,"selfcloseTag"==t||c.autoSelfClosers.hasOwnProperty(r)?h(n,r):(h(n,r),n.context=new g(n,r,a==n.indented)),x}return i="error",S}function w(t,e,n){return"equals"==t?M:(c.allowMissing||(i="error"),S(t,0,n))}function M(t,e,n){return"string"==t?y:"word"==t&&c.allowUnquoted?(i="string",S):(i="error",S(t,0,n))}function y(t,e,n){return"string"==t?y:S(t,0,n)}return d.isInText=!0,{startState:function(t){var e={tokenize:d,state:x,indented:t||0,tagName:null,tagStart:null,context:null};return null!=t&&(e.baseIndent=t),e},token:function(t,e){if(!e.tagName&&t.sol()&&(e.indented=t.indentation()),t.eatSpace())return null;o=null;var n=e.tokenize(t,e);return(n||o)&&"comment"!=n&&(i=null,e.state=e.state(o||n,t,e),i&&(n="error"==i?n+" error":i)),n},indent:function(e,n,r){var a=e.context;if(e.tokenize.isInAttribute)return e.tagStart==e.indented?e.stringStartCol+1:e.indented+l;if(a&&a.noIndent)return t.Pass;if(e.tokenize!=m&&e.tokenize!=d)return r?r.match(/^(\s*)/)[0].length:0;if(e.tagName)return!1!==c.multilineTagIndentPastTag?e.tagStart+e.tagName.length+2:e.tagStart+l*(c.multilineTagIndentFactor||1);if(c.alignCDATA&&/<!\[CDATA\[/.test(n))return 0;var o=n&&/^<(\/)?([\w_:\.-]*)/.exec(n);if(o&&o[1])for(;a;){if(a.tagName==o[2]){a=a.prev;break}if(!c.implicitlyClosed.hasOwnProperty(a.tagName))break;a=a.prev}else if(o)for(;a;){var i=c.contextGrabbers[a.tagName];if(!i||!i.hasOwnProperty(o[2]))break;a=a.prev}for(;a&&a.prev&&!a.startOfLine;)a=a.prev;return a?a.indent+l:e.baseIndent||0},electricInput:/<\/[\s\w:]+>$/,blockCommentStart:"\x3c!--",blockCommentEnd:"--\x3e",configuration:c.htmlMode?"html":"xml",helperType:c.htmlMode?"html":"xml",skipAttribute:function(t){t.state==M&&(t.state=S)},xmlCurrentTag:function(t){return t.tagName?{name:t.tagName,close:"closeTag"==t.type}:null},xmlCurrentContext:function(t){for(var e=[],n=t.context;n;n=n.prev)n.tagName&&e.push(n.tagName);return e.reverse()}}}),t.defineMIME("text/xml","xml"),t.defineMIME("application/xml","xml"),t.mimeModes.hasOwnProperty("text/html")||t.defineMIME("text/html",{name:"xml",htmlMode:!0})}(n(34))},673:function(t,e,n){!function(t){"use strict";var e={script:[["lang",/(javascript|babel)/i,"javascript"],["type",/^(?:text|application)\/(?:x-)?(?:java|ecma)script$|^module$|^$/i,"javascript"],["type",/./,"text/plain"],[null,null,"javascript"]],style:[["lang",/^css$/i,"css"],["type",/^(text\/)?(x-)?(stylesheet|css)$/i,"css"],["type",/./,"text/plain"],[null,null,"css"]]},n={};function r(t,e){var r=t.match(function(t){var e=n[t];return e||(n[t]=new RegExp("\\s+"+t+"\\s*=\\s*('|\")?([^'\"]+)('|\")?\\s*"))}(e));return r?/^\s*(.*?)\s*$/.exec(r[2])[1]:""}function a(t,e){return new RegExp((e?"^":"")+"</s*"+t+"s*>","i")}function o(t,e){for(var n in t)for(var r=e[n]||(e[n]=[]),a=t[n],o=a.length-1;o>=0;o--)r.unshift(a[o])}t.defineMode("htmlmixed",function(n,i){var l=t.getMode(n,{name:"xml",htmlMode:!0,multilineTagIndentFactor:i.multilineTagIndentFactor,multilineTagIndentPastTag:i.multilineTagIndentPastTag}),c={},u=i&&i.tags,s=i&&i.scriptTypes;if(o(e,c),u&&o(u,c),s)for(var d=s.length-1;d>=0;d--)c.script.unshift(["type",s[d].matches,s[d].mode]);function m(e,o){var i,u=l.token(e,o.htmlState),s=/\btag\b/.test(u);if(s&&!/[<>\s\/]/.test(e.current())&&(i=o.htmlState.tagName&&o.htmlState.tagName.toLowerCase())&&c.hasOwnProperty(i))o.inTag=i+" ";else if(o.inTag&&s&&/>$/.test(e.current())){var d=/^([\S]+) (.*)/.exec(o.inTag);o.inTag=null;var f=">"==e.current()&&function(t,e){for(var n=0;n<t.length;n++){var a=t[n];if(!a[0]||a[1].test(r(e,a[0])))return a[2]}}(c[d[1]],d[2]),g=t.getMode(n,f),p=a(d[1],!0),h=a(d[1],!1);o.token=function(t,e){return t.match(p,!1)?(e.token=m,e.localState=e.localMode=null,null):function(t,e,n){var r=t.current(),a=r.search(e);return a>-1?t.backUp(r.length-a):r.match(/<\/?$/)&&(t.backUp(r.length),t.match(e,!1)||t.match(r)),n}(t,h,e.localMode.token(t,e.localState))},o.localMode=g,o.localState=t.startState(g,l.indent(o.htmlState,"",""))}else o.inTag&&(o.inTag+=e.current(),e.eol()&&(o.inTag+=" "));return u}return{startState:function(){var e=t.startState(l);return{token:m,inTag:null,localMode:null,localState:null,htmlState:e}},copyState:function(e){var n;return e.localState&&(n=t.copyState(e.localMode,e.localState)),{token:e.token,inTag:e.inTag,localMode:e.localMode,localState:n,htmlState:t.copyState(l,e.htmlState)}},token:function(t,e){return e.token(t,e)},indent:function(e,n,r){return!e.localMode||/^\s*<\//.test(n)?l.indent(e.htmlState,n,r):e.localMode.indent?e.localMode.indent(e.localState,n,r):t.Pass},innerMode:function(t){return{state:t.localState||t.htmlState,mode:t.localMode||l}}}},"xml","javascript","css"),t.defineMIME("text/html","htmlmixed")}(n(34),n(672),n(674),n(152))}}]);
//# sourceMappingURL=25.5bb6c60c.chunk.js.map
|
||
ffi.rs
|
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
/* Turn off "improper_ctypes" warning,
* See https://github.com/rust-lang/rust/issues/34798
*/
#![allow(improper_ctypes)]
/* automatically generated by rust-bindgen */
#[repr(C)]
pub struct __BindgenUnionField<T>(::std::marker::PhantomData<T>);
impl <T> __BindgenUnionField<T> {
#[inline]
pub fn new() -> Self { __BindgenUnionField(::std::marker::PhantomData) }
#[inline]
pub unsafe fn as_ref(&self) -> &T { ::std::mem::transmute(self) }
#[inline]
pub unsafe fn as_mut(&mut self) -> &mut T { ::std::mem::transmute(self) }
}
impl <T> ::std::default::Default for __BindgenUnionField<T> {
#[inline]
fn default() -> Self { Self::new() }
}
impl <T> ::std::clone::Clone for __BindgenUnionField<T> {
#[inline]
fn clone(&self) -> Self { Self::new() }
}
impl <T> ::std::marker::Copy for __BindgenUnionField<T> { }
impl <T> ::std::fmt::Debug for __BindgenUnionField<T> {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
fmt.write_str("__BindgenUnionField")
}
}
pub const _STDINT_H: ::std::os::raw::c_uint = 1;
pub const _FEATURES_H: ::std::os::raw::c_uint = 1;
pub const _DEFAULT_SOURCE: ::std::os::raw::c_uint = 1;
pub const __USE_ISOC11: ::std::os::raw::c_uint = 1;
pub const __USE_ISOC99: ::std::os::raw::c_uint = 1;
pub const __USE_ISOC95: ::std::os::raw::c_uint = 1;
pub const __USE_POSIX_IMPLICITLY: ::std::os::raw::c_uint = 1;
pub const _POSIX_SOURCE: ::std::os::raw::c_uint = 1;
pub const _POSIX_C_SOURCE: ::std::os::raw::c_uint = 200809;
pub const __USE_POSIX: ::std::os::raw::c_uint = 1;
pub const __USE_POSIX2: ::std::os::raw::c_uint = 1;
pub const __USE_POSIX199309: ::std::os::raw::c_uint = 1;
pub const __USE_POSIX199506: ::std::os::raw::c_uint = 1;
pub const __USE_XOPEN2K: ::std::os::raw::c_uint = 1;
pub const __USE_XOPEN2K8: ::std::os::raw::c_uint = 1;
pub const _ATFILE_SOURCE: ::std::os::raw::c_uint = 1;
pub const __USE_MISC: ::std::os::raw::c_uint = 1;
pub const __USE_ATFILE: ::std::os::raw::c_uint = 1;
pub const __USE_FORTIFY_LEVEL: ::std::os::raw::c_uint = 0;
pub const _STDC_PREDEF_H: ::std::os::raw::c_uint = 1;
pub const __STDC_IEC_559__: ::std::os::raw::c_uint = 1;
pub const __STDC_IEC_559_COMPLEX__: ::std::os::raw::c_uint = 1;
pub const __STDC_ISO_10646__: ::std::os::raw::c_uint = 201505;
pub const __STDC_NO_THREADS__: ::std::os::raw::c_uint = 1;
pub const __GNU_LIBRARY__: ::std::os::raw::c_uint = 6;
pub const __GLIBC__: ::std::os::raw::c_uint = 2;
pub const __GLIBC_MINOR__: ::std::os::raw::c_uint = 23;
pub const _SYS_CDEFS_H: ::std::os::raw::c_uint = 1;
pub const __WORDSIZE: ::std::os::raw::c_uint = 64;
pub const __WORDSIZE_TIME64_COMPAT32: ::std::os::raw::c_uint = 1;
pub const __SYSCALL_WORDSIZE: ::std::os::raw::c_uint = 64;
pub const _BITS_WCHAR_H: ::std::os::raw::c_uint = 1;
pub const INT8_MIN: ::std::os::raw::c_int = -128;
pub const INT16_MIN: ::std::os::raw::c_int = -32768;
pub const INT32_MIN: ::std::os::raw::c_int = -2147483648;
pub const INT8_MAX: ::std::os::raw::c_uint = 127;
pub const INT16_MAX: ::std::os::raw::c_uint = 32767;
pub const INT32_MAX: ::std::os::raw::c_uint = 2147483647;
pub const UINT8_MAX: ::std::os::raw::c_uint = 255;
pub const UINT16_MAX: ::std::os::raw::c_uint = 65535;
pub const UINT32_MAX: ::std::os::raw::c_uint = 4294967295;
pub const INT_LEAST8_MIN: ::std::os::raw::c_int = -128;
pub const INT_LEAST16_MIN: ::std::os::raw::c_int = -32768;
pub const INT_LEAST32_MIN: ::std::os::raw::c_int = -2147483648;
pub const INT_LEAST8_MAX: ::std::os::raw::c_uint = 127;
pub const INT_LEAST16_MAX: ::std::os::raw::c_uint = 32767;
pub const INT_LEAST32_MAX: ::std::os::raw::c_uint = 2147483647;
pub const UINT_LEAST8_MAX: ::std::os::raw::c_uint = 255;
pub const UINT_LEAST16_MAX: ::std::os::raw::c_uint = 65535;
pub const UINT_LEAST32_MAX: ::std::os::raw::c_uint = 4294967295;
pub const INT_FAST8_MIN: ::std::os::raw::c_int = -128;
pub const INT_FAST16_MIN: ::std::os::raw::c_longlong = -9223372036854775808;
pub const INT_FAST32_MIN: ::std::os::raw::c_longlong = -9223372036854775808;
pub const INT_FAST8_MAX: ::std::os::raw::c_uint = 127;
pub const INT_FAST16_MAX: ::std::os::raw::c_ulonglong = 9223372036854775807;
pub const INT_FAST32_MAX: ::std::os::raw::c_ulonglong = 9223372036854775807;
pub const UINT_FAST8_MAX: ::std::os::raw::c_uint = 255;
pub const UINT_FAST16_MAX: ::std::os::raw::c_int = -1;
pub const UINT_FAST32_MAX: ::std::os::raw::c_int = -1;
pub const INTPTR_MIN: ::std::os::raw::c_longlong = -9223372036854775808;
pub const INTPTR_MAX: ::std::os::raw::c_ulonglong = 9223372036854775807;
pub const UINTPTR_MAX: ::std::os::raw::c_int = -1;
pub const PTRDIFF_MIN: ::std::os::raw::c_longlong = -9223372036854775808;
pub const PTRDIFF_MAX: ::std::os::raw::c_ulonglong = 9223372036854775807;
pub const SIG_ATOMIC_MIN: ::std::os::raw::c_int = -2147483648;
pub const SIG_ATOMIC_MAX: ::std::os::raw::c_uint = 2147483647;
pub const SIZE_MAX: ::std::os::raw::c_int = -1;
pub const WINT_MIN: ::std::os::raw::c_uint = 0;
pub const WINT_MAX: ::std::os::raw::c_uint = 4294967295;
pub const VA_MAJOR_VERSION: ::std::os::raw::c_uint = 0;
pub const VA_MINOR_VERSION: ::std::os::raw::c_uint = 39;
pub const VA_MICRO_VERSION: ::std::os::raw::c_uint = 4;
pub const VA_VERSION_S: &'static [u8; 7usize] = b"0.39.4\x00";
pub const VA_VERSION_HEX: ::std::os::raw::c_uint = 2556928;
pub const VA_STATUS_SUCCESS: ::std::os::raw::c_uint = 0;
pub const VA_STATUS_ERROR_OPERATION_FAILED: ::std::os::raw::c_uint = 1;
pub const VA_STATUS_ERROR_ALLOCATION_FAILED: ::std::os::raw::c_uint = 2;
pub const VA_STATUS_ERROR_INVALID_DISPLAY: ::std::os::raw::c_uint = 3;
pub const VA_STATUS_ERROR_INVALID_CONFIG: ::std::os::raw::c_uint = 4;
pub const VA_STATUS_ERROR_INVALID_CONTEXT: ::std::os::raw::c_uint = 5;
pub const VA_STATUS_ERROR_INVALID_SURFACE: ::std::os::raw::c_uint = 6;
pub const VA_STATUS_ERROR_INVALID_BUFFER: ::std::os::raw::c_uint = 7;
pub const VA_STATUS_ERROR_INVALID_IMAGE: ::std::os::raw::c_uint = 8;
pub const VA_STATUS_ERROR_INVALID_SUBPICTURE: ::std::os::raw::c_uint = 9;
pub const VA_STATUS_ERROR_ATTR_NOT_SUPPORTED: ::std::os::raw::c_uint = 10;
pub const VA_STATUS_ERROR_MAX_NUM_EXCEEDED: ::std::os::raw::c_uint = 11;
pub const VA_STATUS_ERROR_UNSUPPORTED_PROFILE: ::std::os::raw::c_uint = 12;
pub const VA_STATUS_ERROR_UNSUPPORTED_ENTRYPOINT: ::std::os::raw::c_uint = 13;
pub const VA_STATUS_ERROR_UNSUPPORTED_RT_FORMAT: ::std::os::raw::c_uint = 14;
pub const VA_STATUS_ERROR_UNSUPPORTED_BUFFERTYPE: ::std::os::raw::c_uint = 15;
pub const VA_STATUS_ERROR_SURFACE_BUSY: ::std::os::raw::c_uint = 16;
pub const VA_STATUS_ERROR_FLAG_NOT_SUPPORTED: ::std::os::raw::c_uint = 17;
pub const VA_STATUS_ERROR_INVALID_PARAMETER: ::std::os::raw::c_uint = 18;
pub const VA_STATUS_ERROR_RESOLUTION_NOT_SUPPORTED: ::std::os::raw::c_uint =
19;
pub const VA_STATUS_ERROR_UNIMPLEMENTED: ::std::os::raw::c_uint = 20;
pub const VA_STATUS_ERROR_SURFACE_IN_DISPLAYING: ::std::os::raw::c_uint = 21;
pub const VA_STATUS_ERROR_INVALID_IMAGE_FORMAT: ::std::os::raw::c_uint = 22;
pub const VA_STATUS_ERROR_DECODING_ERROR: ::std::os::raw::c_uint = 23;
pub const VA_STATUS_ERROR_ENCODING_ERROR: ::std::os::raw::c_uint = 24;
pub const VA_STATUS_ERROR_INVALID_VALUE: ::std::os::raw::c_uint = 25;
pub const VA_STATUS_ERROR_UNSUPPORTED_FILTER: ::std::os::raw::c_uint = 32;
pub const VA_STATUS_ERROR_INVALID_FILTER_CHAIN: ::std::os::raw::c_uint = 33;
pub const VA_STATUS_ERROR_HW_BUSY: ::std::os::raw::c_uint = 34;
pub const VA_STATUS_ERROR_UNSUPPORTED_MEMORY_TYPE: ::std::os::raw::c_uint =
36;
pub const VA_STATUS_ERROR_UNKNOWN: ::std::os::raw::c_uint = 4294967295;
pub const VA_FRAME_PICTURE: ::std::os::raw::c_uint = 0;
pub const VA_TOP_FIELD: ::std::os::raw::c_uint = 1;
pub const VA_BOTTOM_FIELD: ::std::os::raw::c_uint = 2;
pub const VA_ENABLE_BLEND: ::std::os::raw::c_uint = 4;
pub const VA_CLEAR_DRAWABLE: ::std::os::raw::c_uint = 8;
pub const VA_SRC_COLOR_MASK: ::std::os::raw::c_uint = 240;
pub const VA_SRC_BT601: ::std::os::raw::c_uint = 16;
pub const VA_SRC_BT709: ::std::os::raw::c_uint = 32;
pub const VA_SRC_SMPTE_240: ::std::os::raw::c_uint = 64;
pub const VA_FILTER_SCALING_DEFAULT: ::std::os::raw::c_uint = 0;
pub const VA_FILTER_SCALING_FAST: ::std::os::raw::c_uint = 256;
pub const VA_FILTER_SCALING_HQ: ::std::os::raw::c_uint = 512;
pub const VA_FILTER_SCALING_NL_ANAMORPHIC: ::std::os::raw::c_uint = 768;
pub const VA_FILTER_SCALING_MASK: ::std::os::raw::c_uint = 3840;
pub const VA_RT_FORMAT_YUV420: ::std::os::raw::c_uint = 1;
pub const VA_RT_FORMAT_YUV422: ::std::os::raw::c_uint = 2;
pub const VA_RT_FORMAT_YUV444: ::std::os::raw::c_uint = 4;
pub const VA_RT_FORMAT_YUV411: ::std::os::raw::c_uint = 8;
pub const VA_RT_FORMAT_YUV400: ::std::os::raw::c_uint = 16;
pub const VA_RT_FORMAT_YUV420_10BPP: ::std::os::raw::c_uint = 256;
pub const VA_RT_FORMAT_RGB16: ::std::os::raw::c_uint = 65536;
pub const VA_RT_FORMAT_RGB32: ::std::os::raw::c_uint = 131072;
pub const VA_RT_FORMAT_RGBP: ::std::os::raw::c_uint = 1048576;
pub const VA_RT_FORMAT_PROTECTED: ::std::os::raw::c_uint = 2147483648;
pub const VA_RC_NONE: ::std::os::raw::c_uint = 1;
pub const VA_RC_CBR: ::std::os::raw::c_uint = 2;
pub const VA_RC_VBR: ::std::os::raw::c_uint = 4;
pub const VA_RC_VCM: ::std::os::raw::c_uint = 8;
pub const VA_RC_CQP: ::std::os::raw::c_uint = 16;
pub const VA_RC_VBR_CONSTRAINED: ::std::os::raw::c_uint = 32;
pub const VA_RC_MB: ::std::os::raw::c_uint = 128;
pub const VA_DEC_SLICE_MODE_NORMAL: ::std::os::raw::c_uint = 1;
pub const VA_DEC_SLICE_MODE_BASE: ::std::os::raw::c_uint = 2;
pub const VA_ENC_PACKED_HEADER_NONE: ::std::os::raw::c_uint = 0;
pub const VA_ENC_PACKED_HEADER_SEQUENCE: ::std::os::raw::c_uint = 1;
pub const VA_ENC_PACKED_HEADER_PICTURE: ::std::os::raw::c_uint = 2;
pub const VA_ENC_PACKED_HEADER_SLICE: ::std::os::raw::c_uint = 4;
pub const VA_ENC_PACKED_HEADER_MISC: ::std::os::raw::c_uint = 8;
pub const VA_ENC_PACKED_HEADER_RAW_DATA: ::std::os::raw::c_uint = 16;
pub const VA_ENC_INTERLACED_NONE: ::std::os::raw::c_uint = 0;
pub const VA_ENC_INTERLACED_FRAME: ::std::os::raw::c_uint = 1;
pub const VA_ENC_INTERLACED_FIELD: ::std::os::raw::c_uint = 2;
pub const VA_ENC_INTERLACED_MBAFF: ::std::os::raw::c_uint = 4;
pub const VA_ENC_INTERLACED_PAFF: ::std::os::raw::c_uint = 8;
pub const VA_ENC_SLICE_STRUCTURE_ARBITRARY_ROWS: ::std::os::raw::c_uint = 0;
pub const VA_ENC_SLICE_STRUCTURE_POWER_OF_TWO_ROWS: ::std::os::raw::c_uint =
1;
pub const VA_ENC_SLICE_STRUCTURE_ARBITRARY_MACROBLOCKS: ::std::os::raw::c_uint
=
2;
pub const VA_ATTRIB_NOT_SUPPORTED: ::std::os::raw::c_uint = 2147483648;
pub const VA_INVALID_ID: ::std::os::raw::c_uint = 4294967295;
pub const VA_INVALID_SURFACE: ::std::os::raw::c_uint = 4294967295;
pub const VA_SURFACE_ATTRIB_NOT_SUPPORTED: ::std::os::raw::c_uint = 0;
pub const VA_SURFACE_ATTRIB_GETTABLE: ::std::os::raw::c_uint = 1;
pub const VA_SURFACE_ATTRIB_SETTABLE: ::std::os::raw::c_uint = 2;
pub const VA_SURFACE_ATTRIB_MEM_TYPE_VA: ::std::os::raw::c_uint = 1;
pub const VA_SURFACE_ATTRIB_MEM_TYPE_V4L2: ::std::os::raw::c_uint = 2;
pub const VA_SURFACE_ATTRIB_MEM_TYPE_USER_PTR: ::std::os::raw::c_uint = 4;
pub const VA_SURFACE_EXTBUF_DESC_ENABLE_TILING: ::std::os::raw::c_uint = 1;
pub const VA_SURFACE_EXTBUF_DESC_CACHED: ::std::os::raw::c_uint = 2;
pub const VA_SURFACE_EXTBUF_DESC_UNCACHED: ::std::os::raw::c_uint = 4;
pub const VA_SURFACE_EXTBUF_DESC_WC: ::std::os::raw::c_uint = 8;
pub const VA_SURFACE_EXTBUF_DESC_PROTECTED: ::std::os::raw::c_uint =
2147483648;
pub const VA_SURFACE_ATTRIB_USAGE_HINT_GENERIC: ::std::os::raw::c_uint = 0;
pub const VA_SURFACE_ATTRIB_USAGE_HINT_DECODER: ::std::os::raw::c_uint = 1;
pub const VA_SURFACE_ATTRIB_USAGE_HINT_ENCODER: ::std::os::raw::c_uint = 2;
pub const VA_SURFACE_ATTRIB_USAGE_HINT_VPP_READ: ::std::os::raw::c_uint = 4;
pub const VA_SURFACE_ATTRIB_USAGE_HINT_VPP_WRITE: ::std::os::raw::c_uint = 8;
pub const VA_SURFACE_ATTRIB_USAGE_HINT_DISPLAY: ::std::os::raw::c_uint = 16;
pub const VA_PROGRESSIVE: ::std::os::raw::c_uint = 1;
pub const VA_SLICE_DATA_FLAG_ALL: ::std::os::raw::c_uint = 0;
pub const VA_SLICE_DATA_FLAG_BEGIN: ::std::os::raw::c_uint = 1;
pub const VA_SLICE_DATA_FLAG_MIDDLE: ::std::os::raw::c_uint = 2;
pub const VA_SLICE_DATA_FLAG_END: ::std::os::raw::c_uint = 4;
pub const VA_MB_TYPE_MOTION_FORWARD: ::std::os::raw::c_uint = 2;
pub const VA_MB_TYPE_MOTION_BACKWARD: ::std::os::raw::c_uint = 4;
pub const VA_MB_TYPE_MOTION_PATTERN: ::std::os::raw::c_uint = 8;
pub const VA_MB_TYPE_MOTION_INTRA: ::std::os::raw::c_uint = 16;
pub const VA_PICTURE_H264_INVALID: ::std::os::raw::c_uint = 1;
pub const VA_PICTURE_H264_TOP_FIELD: ::std::os::raw::c_uint = 2;
pub const VA_PICTURE_H264_BOTTOM_FIELD: ::std::os::raw::c_uint = 4;
pub const VA_PICTURE_H264_SHORT_TERM_REFERENCE: ::std::os::raw::c_uint = 8;
pub const VA_PICTURE_H264_LONG_TERM_REFERENCE: ::std::os::raw::c_uint = 16;
pub const VA_CODED_BUF_STATUS_PICTURE_AVE_QP_MASK: ::std::os::raw::c_uint =
255;
pub const VA_CODED_BUF_STATUS_LARGE_SLICE_MASK: ::std::os::raw::c_uint = 256;
pub const VA_CODED_BUF_STATUS_SLICE_OVERFLOW_MASK: ::std::os::raw::c_uint =
512;
pub const VA_CODED_BUF_STATUS_BITRATE_OVERFLOW: ::std::os::raw::c_uint = 1024;
pub const VA_CODED_BUF_STATUS_BITRATE_HIGH: ::std::os::raw::c_uint = 2048;
pub const VA_CODED_BUF_STATUS_FRAME_SIZE_OVERFLOW: ::std::os::raw::c_uint =
4096;
pub const VA_CODED_BUF_STATUS_AIR_MB_OVER_THRESHOLD: ::std::os::raw::c_uint =
16711680;
pub const VA_CODED_BUF_STATUS_SINGLE_NALU: ::std::os::raw::c_uint = 268435456;
pub const VA_FOURCC_NV12: ::std::os::raw::c_uint = 842094158;
pub const VA_FOURCC_AI44: ::std::os::raw::c_uint = 875839817;
pub const VA_FOURCC_RGBA: ::std::os::raw::c_uint = 1094862674;
pub const VA_FOURCC_RGBX: ::std::os::raw::c_uint = 1480738642;
pub const VA_FOURCC_BGRA: ::std::os::raw::c_uint = 1095911234;
pub const VA_FOURCC_BGRX: ::std::os::raw::c_uint = 1481787202;
pub const VA_FOURCC_ARGB: ::std::os::raw::c_uint = 1111970369;
pub const VA_FOURCC_XRGB: ::std::os::raw::c_uint = 1111970392;
pub const VA_FOURCC_ABGR: ::std::os::raw::c_uint = 1380401729;
pub const VA_FOURCC_XBGR: ::std::os::raw::c_uint = 1380401752;
pub const VA_FOURCC_UYVY: ::std::os::raw::c_uint = 1498831189;
pub const VA_FOURCC_YUY2: ::std::os::raw::c_uint = 844715353;
pub const VA_FOURCC_AYUV: ::std::os::raw::c_uint = 1448433985;
pub const VA_FOURCC_NV11: ::std::os::raw::c_uint = 825316942;
pub const VA_FOURCC_YV12: ::std::os::raw::c_uint = 842094169;
pub const VA_FOURCC_P208: ::std::os::raw::c_uint = 942682704;
pub const VA_FOURCC_IYUV: ::std::os::raw::c_uint = 1448433993;
pub const VA_FOURCC_YV24: ::std::os::raw::c_uint = 875714137;
pub const VA_FOURCC_YV32: ::std::os::raw::c_uint = 842225241;
pub const VA_FOURCC_Y800: ::std::os::raw::c_uint = 808466521;
pub const VA_FOURCC_IMC3: ::std::os::raw::c_uint = 860048713;
pub const VA_FOURCC_411P: ::std::os::raw::c_uint = 1345401140;
pub const VA_FOURCC_422H: ::std::os::raw::c_uint = 1211249204;
pub const VA_FOURCC_422V: ::std::os::raw::c_uint = 1446130228;
pub const VA_FOURCC_444P: ::std::os::raw::c_uint = 1345598516;
pub const VA_FOURCC_RGBP: ::std::os::raw::c_uint = 1346520914;
pub const VA_FOURCC_BGRP: ::std::os::raw::c_uint = 1347569474;
pub const VA_FOURCC_411R: ::std::os::raw::c_uint = 1378955572;
pub const VA_FOURCC_YV16: ::std::os::raw::c_uint = 909203033;
pub const VA_FOURCC_P010: ::std::os::raw::c_uint = 808530000;
pub const VA_FOURCC_P016: ::std::os::raw::c_uint = 909193296;
pub const VA_LSB_FIRST: ::std::os::raw::c_uint = 1;
pub const VA_MSB_FIRST: ::std::os::raw::c_uint = 2;
pub const VA_SUBPICTURE_CHROMA_KEYING: ::std::os::raw::c_uint = 1;
pub const VA_SUBPICTURE_GLOBAL_ALPHA: ::std::os::raw::c_uint = 2;
pub const VA_SUBPICTURE_DESTINATION_IS_SCREEN_COORD: ::std::os::raw::c_uint =
4;
pub const VA_ROTATION_NONE: ::std::os::raw::c_uint = 0;
pub const VA_ROTATION_90: ::std::os::raw::c_uint = 1;
pub const VA_ROTATION_180: ::std::os::raw::c_uint = 2;
pub const VA_ROTATION_270: ::std::os::raw::c_uint = 3;
pub const VA_OOL_DEBLOCKING_FALSE: ::std::os::raw::c_uint = 0;
pub const VA_OOL_DEBLOCKING_TRUE: ::std::os::raw::c_uint = 1;
pub const VA_RENDER_MODE_UNDEFINED: ::std::os::raw::c_uint = 0;
pub const VA_RENDER_MODE_LOCAL_OVERLAY: ::std::os::raw::c_uint = 1;
pub const VA_RENDER_MODE_LOCAL_GPU: ::std::os::raw::c_uint = 2;
pub const VA_RENDER_MODE_EXTERNAL_OVERLAY: ::std::os::raw::c_uint = 4;
pub const VA_RENDER_MODE_EXTERNAL_GPU: ::std::os::raw::c_uint = 8;
pub const VA_RENDER_DEVICE_UNDEFINED: ::std::os::raw::c_uint = 0;
pub const VA_RENDER_DEVICE_LOCAL: ::std::os::raw::c_uint = 1;
pub const VA_RENDER_DEVICE_EXTERNAL: ::std::os::raw::c_uint = 2;
pub const VA_DISPLAY_ATTRIB_NOT_SUPPORTED: ::std::os::raw::c_uint = 0;
pub const VA_DISPLAY_ATTRIB_GETTABLE: ::std::os::raw::c_uint = 1;
pub const VA_DISPLAY_ATTRIB_SETTABLE: ::std::os::raw::c_uint = 2;
pub const VA_PICTURE_HEVC_INVALID: ::std::os::raw::c_uint = 1;
pub const VA_PICTURE_HEVC_FIELD_PIC: ::std::os::raw::c_uint = 2;
pub const VA_PICTURE_HEVC_BOTTOM_FIELD: ::std::os::raw::c_uint = 4;
pub const VA_PICTURE_HEVC_LONG_TERM_REFERENCE: ::std::os::raw::c_uint = 8;
pub const VA_PICTURE_HEVC_RPS_ST_CURR_BEFORE: ::std::os::raw::c_uint = 16;
pub const VA_PICTURE_HEVC_RPS_ST_CURR_AFTER: ::std::os::raw::c_uint = 32;
pub const VA_PICTURE_HEVC_RPS_LT_CURR: ::std::os::raw::c_uint = 64;
pub const HEVC_LAST_PICTURE_EOSEQ: ::std::os::raw::c_uint = 1;
pub const HEVC_LAST_PICTURE_EOSTREAM: ::std::os::raw::c_uint = 2;
pub const H264_LAST_PICTURE_EOSEQ: ::std::os::raw::c_uint = 1;
pub const H264_LAST_PICTURE_EOSTREAM: ::std::os::raw::c_uint = 2;
pub const VA_MB_PRED_AVAIL_TOP_LEFT: ::std::os::raw::c_uint = 4;
pub const VA_MB_PRED_AVAIL_TOP: ::std::os::raw::c_uint = 16;
pub const VA_MB_PRED_AVAIL_TOP_RIGHT: ::std::os::raw::c_uint = 8;
pub const VA_MB_PRED_AVAIL_LEFT: ::std::os::raw::c_uint = 64;
pub const VA_PROC_PIPELINE_SUBPICTURES: ::std::os::raw::c_uint = 1;
pub const VA_PROC_PIPELINE_FAST: ::std::os::raw::c_uint = 2;
pub const VA_PROC_FILTER_MANDATORY: ::std::os::raw::c_uint = 1;
pub const VA_PIPELINE_FLAG_END: ::std::os::raw::c_uint = 4;
pub const VA_DEINTERLACING_BOTTOM_FIELD_FIRST: ::std::os::raw::c_uint = 1;
pub const VA_DEINTERLACING_BOTTOM_FIELD: ::std::os::raw::c_uint = 2;
pub const VA_DEINTERLACING_ONE_FIELD: ::std::os::raw::c_uint = 4;
pub type wchar_t = ::std::os::raw::c_int;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _bindgen_ty_1 {
pub __clang_max_align_nonce1: ::std::os::raw::c_longlong,
pub __clang_max_align_nonce2: f64,
}
impl Clone for _bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
pub type max_align_t = _bindgen_ty_1;
pub type int_least8_t = ::std::os::raw::c_char;
pub type int_least16_t = ::std::os::raw::c_short;
pub type int_least32_t = ::std::os::raw::c_int;
pub type int_least64_t = ::std::os::raw::c_long;
pub type uint_least8_t = ::std::os::raw::c_uchar;
pub type uint_least16_t = ::std::os::raw::c_ushort;
pub type uint_least32_t = ::std::os::raw::c_uint;
pub type uint_least64_t = ::std::os::raw::c_ulong;
pub type int_fast8_t = ::std::os::raw::c_char;
pub type int_fast16_t = ::std::os::raw::c_long;
pub type int_fast32_t = ::std::os::raw::c_long;
pub type int_fast64_t = ::std::os::raw::c_long;
pub type uint_fast8_t = ::std::os::raw::c_uchar;
pub type uint_fast16_t = ::std::os::raw::c_ulong;
pub type uint_fast32_t = ::std::os::raw::c_ulong;
pub type uint_fast64_t = ::std::os::raw::c_ulong;
pub type intmax_t = ::std::os::raw::c_long;
pub type uintmax_t = ::std::os::raw::c_ulong;
/**
Overview
The VA API is intended to provide an interface between a video decode/encode/display
application (client) and a hardware accelerator (server), to off-load
video decode/encode/display operations from the host to the hardware accelerator at various
entry-points.
The basic operation steps are:
- Negotiate a mutually acceptable configuration with the server to lock
down profile, entrypoints, and other attributes that will not change on
a frame-by-frame basis.
- Create a decode context which represents a "virtualized" hardware decode
device
- Get and fill decode buffers with picture level, slice level and macroblock
level data (depending on entrypoints)
- Pass the decode buffers to the server to decode the current frame
Initialization & Configuration Management
- Find out supported profiles
- Find out entrypoints for a given profile
- Find out configuration attributes for a given profile/entrypoint pair
- Create a configuration for use by the decoder
*/
pub type VADisplay = *mut ::std::os::raw::c_void;
pub type VAStatus = ::std::os::raw::c_int;
#[link(name = "va")]
extern "C" {
/**
* Returns a short english description of error_status
*/
pub fn vaErrorStr(error_status: VAStatus)
-> *const ::std::os::raw::c_char;
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VARectangle {
pub x: ::std::os::raw::c_short,
pub y: ::std::os::raw::c_short,
pub width: ::std::os::raw::c_ushort,
pub height: ::std::os::raw::c_ushort,
}
#[test]
fn bindgen_test_layout__VARectangle() {
assert_eq!(::std::mem::size_of::<_VARectangle>() , 8usize);
assert_eq!(::std::mem::align_of::<_VARectangle>() , 2usize);
}
impl Clone for _VARectangle {
fn clone(&self) -> Self { *self }
}
pub type VARectangle = _VARectangle;
/**
* Initialization:
* A display must be obtained by calling vaGetDisplay() before calling
* vaInitialize() and other functions. This connects the API to the
* native window system.
* For X Windows, native_dpy would be from XOpenDisplay()
*/
pub type VANativeDisplay = *mut ::std::os::raw::c_void;
extern "C" {
pub fn vaDisplayIsValid(dpy: VADisplay) -> ::std::os::raw::c_int;
}
extern "C" {
/**
* Set the override driver name instead of queried driver driver.
*/
pub fn vaSetDriverName(dpy: VADisplay,
driver_name: *mut ::std::os::raw::c_char)
-> VAStatus;
}
extern "C" {
/**
* Initialize the library
*/
pub fn vaInitialize(dpy: VADisplay,
major_version: *mut ::std::os::raw::c_int,
minor_version: *mut ::std::os::raw::c_int)
-> VAStatus;
}
extern "C" {
/**
* After this call, all library internal resources will be cleaned up
*/
pub fn vaTerminate(dpy: VADisplay) -> VAStatus;
}
extern "C" {
/**
* vaQueryVendorString returns a pointer to a zero-terminated string
* describing some aspects of the VA implemenation on a specific
* hardware accelerator. The format of the returned string is vendor
* specific and at the discretion of the implementer.
* e.g. for the Intel GMA500 implementation, an example would be:
* "Intel GMA500 - 2.0.0.32L.0005"
*/
pub fn vaQueryVendorString(dpy: VADisplay)
-> *const ::std::os::raw::c_char;
}
pub type VAPrivFunc =
::std::option::Option<unsafe extern "C" fn() -> ::std::os::raw::c_int>;
extern "C" {
/**
* Return a function pointer given a function name in the library.
* This allows private interfaces into the library
*/
pub fn vaGetLibFunc(dpy: VADisplay, func: *const ::std::os::raw::c_char)
-> VAPrivFunc;
}
pub const VAProfileNone: _bindgen_ty_2 = _bindgen_ty_2::VAProfileNone;
pub const VAProfileMPEG2Simple: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileMPEG2Simple;
pub const VAProfileMPEG2Main: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileMPEG2Main;
pub const VAProfileMPEG4Simple: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileMPEG4Simple;
pub const VAProfileMPEG4AdvancedSimple: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileMPEG4AdvancedSimple;
pub const VAProfileMPEG4Main: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileMPEG4Main;
pub const VAProfileH264Baseline: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileH264Baseline;
pub const VAProfileH264Main: _bindgen_ty_2 = _bindgen_ty_2::VAProfileH264Main;
pub const VAProfileH264High: _bindgen_ty_2 = _bindgen_ty_2::VAProfileH264High;
pub const VAProfileVC1Simple: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileVC1Simple;
pub const VAProfileVC1Main: _bindgen_ty_2 = _bindgen_ty_2::VAProfileVC1Main;
pub const VAProfileVC1Advanced: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileVC1Advanced;
pub const VAProfileH263Baseline: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileH263Baseline;
pub const VAProfileJPEGBaseline: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileJPEGBaseline;
pub const VAProfileH264ConstrainedBaseline: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileH264ConstrainedBaseline;
pub const VAProfileVP8Version0_3: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileVP8Version0_3;
pub const VAProfileH264MultiviewHigh: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileH264MultiviewHigh;
pub const VAProfileH264StereoHigh: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileH264StereoHigh;
pub const VAProfileHEVCMain: _bindgen_ty_2 = _bindgen_ty_2::VAProfileHEVCMain;
pub const VAProfileHEVCMain10: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileHEVCMain10;
pub const VAProfileVP9Profile0: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileVP9Profile0;
pub const VAProfileVP9Profile1: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileVP9Profile1;
pub const VAProfileVP9Profile2: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileVP9Profile2;
pub const VAProfileVP9Profile3: _bindgen_ty_2 =
_bindgen_ty_2::VAProfileVP9Profile3;
#[repr(i32)]
/** Currently defined profiles */
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_2 {
VAProfileNone = -1,
VAProfileMPEG2Simple = 0,
VAProfileMPEG2Main = 1,
VAProfileMPEG4Simple = 2,
VAProfileMPEG4AdvancedSimple = 3,
VAProfileMPEG4Main = 4,
VAProfileH264Baseline = 5,
VAProfileH264Main = 6,
VAProfileH264High = 7,
VAProfileVC1Simple = 8,
VAProfileVC1Main = 9,
VAProfileVC1Advanced = 10,
VAProfileH263Baseline = 11,
VAProfileJPEGBaseline = 12,
VAProfileH264ConstrainedBaseline = 13,
VAProfileVP8Version0_3 = 14,
VAProfileH264MultiviewHigh = 15,
VAProfileH264StereoHigh = 16,
VAProfileHEVCMain = 17,
VAProfileHEVCMain10 = 18,
VAProfileVP9Profile0 = 19,
VAProfileVP9Profile1 = 20,
VAProfileVP9Profile2 = 21,
VAProfileVP9Profile3 = 22,
}
pub use self::_bindgen_ty_2 as VAProfile;
pub const VAEntrypointVLD: _bindgen_ty_3 = _bindgen_ty_3::VAEntrypointVLD;
pub const VAEntrypointIZZ: _bindgen_ty_3 = _bindgen_ty_3::VAEntrypointIZZ;
pub const VAEntrypointIDCT: _bindgen_ty_3 = _bindgen_ty_3::VAEntrypointIDCT;
pub const VAEntrypointMoComp: _bindgen_ty_3 =
_bindgen_ty_3::VAEntrypointMoComp;
pub const VAEntrypointDeblocking: _bindgen_ty_3 =
_bindgen_ty_3::VAEntrypointDeblocking;
pub const VAEntrypointEncSlice: _bindgen_ty_3 =
_bindgen_ty_3::VAEntrypointEncSlice;
pub const VAEntrypointEncPicture: _bindgen_ty_3 =
_bindgen_ty_3::VAEntrypointEncPicture;
pub const VAEntrypointEncSliceLP: _bindgen_ty_3 =
_bindgen_ty_3::VAEntrypointEncSliceLP;
pub const VAEntrypointVideoProc: _bindgen_ty_3 =
_bindgen_ty_3::VAEntrypointVideoProc;
#[repr(u32)]
/**
* Currently defined entrypoints
*/
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_3 {
VAEntrypointVLD = 1,
VAEntrypointIZZ = 2,
VAEntrypointIDCT = 3,
VAEntrypointMoComp = 4,
VAEntrypointDeblocking = 5,
VAEntrypointEncSlice = 6,
VAEntrypointEncPicture = 7,
VAEntrypointEncSliceLP = 8,
VAEntrypointVideoProc = 10,
}
pub use self::_bindgen_ty_3 as VAEntrypoint;
pub const VAConfigAttribRTFormat: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribRTFormat;
pub const VAConfigAttribSpatialResidual: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribSpatialResidual;
pub const VAConfigAttribSpatialClipping: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribSpatialClipping;
pub const VAConfigAttribIntraResidual: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribIntraResidual;
pub const VAConfigAttribEncryption: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribEncryption;
pub const VAConfigAttribRateControl: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribRateControl;
pub const VAConfigAttribDecSliceMode: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribDecSliceMode;
pub const VAConfigAttribEncPackedHeaders: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribEncPackedHeaders;
pub const VAConfigAttribEncInterlaced: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribEncInterlaced;
pub const VAConfigAttribEncMaxRefFrames: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribEncMaxRefFrames;
pub const VAConfigAttribEncMaxSlices: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribEncMaxSlices;
pub const VAConfigAttribEncSliceStructure: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribEncSliceStructure;
pub const VAConfigAttribEncMacroblockInfo: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribEncMacroblockInfo;
pub const VAConfigAttribEncJPEG: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribEncJPEG;
pub const VAConfigAttribEncQualityRange: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribEncQualityRange;
pub const VAConfigAttribEncSkipFrame: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribEncSkipFrame;
pub const VAConfigAttribEncROI: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribEncROI;
pub const VAConfigAttribTypeMax: _bindgen_ty_4 =
_bindgen_ty_4::VAConfigAttribTypeMax;
#[repr(u32)]
/** Currently defined configuration attribute types */
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_4 {
VAConfigAttribRTFormat = 0,
VAConfigAttribSpatialResidual = 1,
VAConfigAttribSpatialClipping = 2,
VAConfigAttribIntraResidual = 3,
VAConfigAttribEncryption = 4,
VAConfigAttribRateControl = 5,
VAConfigAttribDecSliceMode = 6,
VAConfigAttribEncPackedHeaders = 10,
VAConfigAttribEncInterlaced = 11,
VAConfigAttribEncMaxRefFrames = 13,
VAConfigAttribEncMaxSlices = 14,
VAConfigAttribEncSliceStructure = 15,
VAConfigAttribEncMacroblockInfo = 16,
VAConfigAttribEncJPEG = 20,
VAConfigAttribEncQualityRange = 21,
VAConfigAttribEncSkipFrame = 24,
VAConfigAttribEncROI = 25,
VAConfigAttribTypeMax = 26,
}
pub use self::_bindgen_ty_4 as VAConfigAttribType;
/**
* Configuration attributes
* If there is more than one value for an attribute, a default
* value will be assigned to the attribute if the client does not
* specify the attribute when creating a configuration
*/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAConfigAttrib {
pub type_: VAConfigAttribType,
pub value: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VAConfigAttrib() {
assert_eq!(::std::mem::size_of::<_VAConfigAttrib>() , 8usize);
assert_eq!(::std::mem::align_of::<_VAConfigAttrib>() , 4usize);
}
impl Clone for _VAConfigAttrib {
fn clone(&self) -> Self { *self }
}
pub type VAConfigAttrib = _VAConfigAttrib;
/** \brief Attribute value for VAConfigAttribEncJPEG */
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAConfigAttribValEncJPEG {
pub bits: __BindgenUnionField<_VAConfigAttribValEncJPEG__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAConfigAttribValEncJPEG__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAConfigAttribValEncJPEG__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAConfigAttribValEncJPEG__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAConfigAttribValEncJPEG__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAConfigAttribValEncJPEG__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAConfigAttribValEncJPEG__bindgen_ty_1 {
#[inline]
pub fn arithmatic_coding_mode(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_arithmatic_coding_mode(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (1usize as u32);
}
#[inline]
pub fn progressive_dct_mode(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u32)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_progressive_dct_mode(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 1u32) & (2usize as u32);
}
#[inline]
pub fn non_interleaved_mode(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u32)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_non_interleaved_mode(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 2u32) & (4usize as u32);
}
#[inline]
pub fn differential_mode(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u32)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_differential_mode(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(8usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 3u32) & (8usize as u32);
}
#[inline]
pub fn max_num_components(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (112usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_max_num_components(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(112usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (112usize as u32);
}
#[inline]
pub fn max_num_scans(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1920usize as u32)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_max_num_scans(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1920usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 7u32) & (1920usize as u32);
}
#[inline]
pub fn max_num_huffman_tables(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (14336usize as u32)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_max_num_huffman_tables(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(14336usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 11u32) & (14336usize as u32);
}
#[inline]
pub fn max_num_quantization_tables(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (114688usize as u32))
>> 14u32) as u32)
}
}
#[inline]
pub fn set_max_num_quantization_tables(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(114688usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (114688usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAConfigAttribValEncJPEG() {
assert_eq!(::std::mem::size_of::<_VAConfigAttribValEncJPEG>() , 4usize);
assert_eq!(::std::mem::align_of::<_VAConfigAttribValEncJPEG>() , 4usize);
}
impl Clone for _VAConfigAttribValEncJPEG {
fn clone(&self) -> Self { *self }
}
pub type VAConfigAttribValEncJPEG = _VAConfigAttribValEncJPEG;
/** \brief Attribute value for VAConfigAttribEncROI */
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAConfigAttribValEncROI {
pub bits: __BindgenUnionField<_VAConfigAttribValEncROI__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAConfigAttribValEncROI__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAConfigAttribValEncROI__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAConfigAttribValEncROI__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAConfigAttribValEncROI__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAConfigAttribValEncROI__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAConfigAttribValEncROI__bindgen_ty_1 {
#[inline]
pub fn num_roi_regions(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (255usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_num_roi_regions(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(255usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (255usize as u32);
}
#[inline]
pub fn roi_rc_priority_support(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u32)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_roi_rc_priority_support(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(256usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 8u32) & (256usize as u32);
}
#[inline]
pub fn reserved(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(4294966784usize as u32)) >> 9u32) as
u32)
}
}
#[inline]
pub fn set_reserved(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4294966784usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 9u32) & (4294966784usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAConfigAttribValEncROI() {
assert_eq!(::std::mem::size_of::<_VAConfigAttribValEncROI>() , 4usize);
assert_eq!(::std::mem::align_of::<_VAConfigAttribValEncROI>() , 4usize);
}
impl Clone for _VAConfigAttribValEncROI {
fn clone(&self) -> Self { *self }
}
pub type VAConfigAttribValEncROI = _VAConfigAttribValEncROI;
extern "C" {
/** Get maximum number of profiles supported by the implementation */
pub fn vaMaxNumProfiles(dpy: VADisplay) -> ::std::os::raw::c_int;
}
extern "C" {
/** Get maximum number of entrypoints supported by the implementation */
pub fn vaMaxNumEntrypoints(dpy: VADisplay) -> ::std::os::raw::c_int;
}
extern "C" {
/** Get maximum number of attributs supported by the implementation */
pub fn vaMaxNumConfigAttributes(dpy: VADisplay) -> ::std::os::raw::c_int;
}
extern "C" {
/**
* Query supported profiles
* The caller must provide a "profile_list" array that can hold at
* least vaMaxNumProfile() entries. The actual number of profiles
* returned in "profile_list" is returned in "num_profile".
*/
pub fn vaQueryConfigProfiles(dpy: VADisplay, profile_list: *mut VAProfile,
num_profiles: *mut ::std::os::raw::c_int)
-> VAStatus;
}
extern "C" {
/**
* Query supported entrypoints for a given profile
* The caller must provide an "entrypoint_list" array that can hold at
* least vaMaxNumEntrypoints() entries. The actual number of entrypoints
* returned in "entrypoint_list" is returned in "num_entrypoints".
*/
pub fn vaQueryConfigEntrypoints(dpy: VADisplay, profile: VAProfile,
entrypoint_list: *mut VAEntrypoint,
num_entrypoints:
*mut ::std::os::raw::c_int)
-> VAStatus;
}
extern "C" {
/**
* Get attributes for a given profile/entrypoint pair
* The caller must provide an "attrib_list" with all attributes to be
* retrieved. Upon return, the attributes in "attrib_list" have been
* updated with their value. Unknown attributes or attributes that are
* not supported for the given profile/entrypoint pair will have their
* value set to VA_ATTRIB_NOT_SUPPORTED
*/
pub fn vaGetConfigAttributes(dpy: VADisplay, profile: VAProfile,
entrypoint: VAEntrypoint,
attrib_list: *mut VAConfigAttrib,
num_attribs: ::std::os::raw::c_int)
-> VAStatus;
}
/** Generic ID type, can be re-typed for specific implementation */
pub type VAGenericID = ::std::os::raw::c_uint;
pub type VAConfigID = VAGenericID;
extern "C" {
/**
* Create a configuration for the decode pipeline
* it passes in the attribute list that specifies the attributes it cares
* about, with the rest taking default values.
*/
pub fn vaCreateConfig(dpy: VADisplay, profile: VAProfile,
entrypoint: VAEntrypoint,
attrib_list: *mut VAConfigAttrib,
num_attribs: ::std::os::raw::c_int,
config_id: *mut VAConfigID) -> VAStatus;
}
extern "C" {
/**
* Free resources associdated with a given config
*/
pub fn vaDestroyConfig(dpy: VADisplay, config_id: VAConfigID) -> VAStatus;
}
extern "C" {
/**
* Query all attributes for a given configuration
* The profile of the configuration is returned in "profile"
* The entrypoint of the configuration is returned in "entrypoint"
* The caller must provide an "attrib_list" array that can hold at least
* vaMaxNumConfigAttributes() entries. The actual number of attributes
* returned in "attrib_list" is returned in "num_attribs"
*/
pub fn vaQueryConfigAttributes(dpy: VADisplay, config_id: VAConfigID,
profile: *mut VAProfile,
entrypoint: *mut VAEntrypoint,
attrib_list: *mut VAConfigAttrib,
num_attribs: *mut ::std::os::raw::c_int)
-> VAStatus;
}
/**
* Contexts and Surfaces
*
* Context represents a "virtual" video decode pipeline. Surfaces are render
* targets for a given context. The data in the surfaces are not accessible
* to the client and the internal data format of the surface is implementatin
* specific.
*
* Surfaces will be bound to a context when the context is created. Once
* a surface is bound to a given context, it can not be used to create
* another context. The association is removed when the context is destroyed
*
* Both contexts and surfaces are identified by unique IDs and its
* implementation specific internals are kept opaque to the clients
*/
pub type VAContextID = VAGenericID;
pub type VASurfaceID = VAGenericID;
pub const VAGenericValueTypeInteger: _bindgen_ty_5 =
_bindgen_ty_5::VAGenericValueTypeInteger;
pub const VAGenericValueTypeFloat: _bindgen_ty_5 =
_bindgen_ty_5::VAGenericValueTypeFloat;
pub const VAGenericValueTypePointer: _bindgen_ty_5 =
_bindgen_ty_5::VAGenericValueTypePointer;
pub const VAGenericValueTypeFunc: _bindgen_ty_5 =
_bindgen_ty_5::VAGenericValueTypeFunc;
#[repr(u32)]
/** \brief Generic value types. */
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_5 {
VAGenericValueTypeInteger = 1,
VAGenericValueTypeFloat = 2,
VAGenericValueTypePointer = 3,
VAGenericValueTypeFunc = 4,
}
pub use self::_bindgen_ty_5 as VAGenericValueType;
/** \brief Generic function type. */
pub type VAGenericFunc = ::std::option::Option<unsafe extern "C" fn()>;
/** \brief Generic value. */
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAGenericValue {
/** \brief Value type. See #VAGenericValueType. */
pub type_: VAGenericValueType,
pub value: _VAGenericValue__bindgen_ty_1,
}
/** \brief Value holder. */
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAGenericValue__bindgen_ty_1 {
/** \brief 32-bit signed integer. */
pub i: __BindgenUnionField<::std::os::raw::c_int>,
/** \brief 32-bit float. */
pub f: __BindgenUnionField<f32>,
/** \brief Generic pointer. */
pub p: __BindgenUnionField<*mut ::std::os::raw::c_void>,
/** \brief Pointer to function. */
pub fn_: __BindgenUnionField<VAGenericFunc>,
pub bindgen_union_field: u64,
}
#[test]
fn bindgen_test_layout__VAGenericValue__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAGenericValue__bindgen_ty_1>() ,
8usize);
assert_eq!(::std::mem::align_of::<_VAGenericValue__bindgen_ty_1>() ,
8usize);
}
impl Clone for _VAGenericValue__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAGenericValue() {
assert_eq!(::std::mem::size_of::<_VAGenericValue>() , 16usize);
assert_eq!(::std::mem::align_of::<_VAGenericValue>() , 8usize);
}
impl Clone for _VAGenericValue {
fn clone(&self) -> Self { *self }
}
pub type VAGenericValue = _VAGenericValue;
pub const VASurfaceAttribNone: _bindgen_ty_6 =
_bindgen_ty_6::VASurfaceAttribNone;
pub const VASurfaceAttribPixelFormat: _bindgen_ty_6 =
_bindgen_ty_6::VASurfaceAttribPixelFormat;
pub const VASurfaceAttribMinWidth: _bindgen_ty_6 =
_bindgen_ty_6::VASurfaceAttribMinWidth;
pub const VASurfaceAttribMaxWidth: _bindgen_ty_6 =
_bindgen_ty_6::VASurfaceAttribMaxWidth;
pub const VASurfaceAttribMinHeight: _bindgen_ty_6 =
_bindgen_ty_6::VASurfaceAttribMinHeight;
pub const VASurfaceAttribMaxHeight: _bindgen_ty_6 =
_bindgen_ty_6::VASurfaceAttribMaxHeight;
pub const VASurfaceAttribMemoryType: _bindgen_ty_6 =
_bindgen_ty_6::VASurfaceAttribMemoryType;
pub const VASurfaceAttribExternalBufferDescriptor: _bindgen_ty_6 =
_bindgen_ty_6::VASurfaceAttribExternalBufferDescriptor;
pub const VASurfaceAttribUsageHint: _bindgen_ty_6 =
_bindgen_ty_6::VASurfaceAttribUsageHint;
pub const VASurfaceAttribCount: _bindgen_ty_6 =
_bindgen_ty_6::VASurfaceAttribCount;
#[repr(u32)]
/** \brief Surface attribute types. */
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_6 {
VASurfaceAttribNone = 0,
VASurfaceAttribPixelFormat = 1,
VASurfaceAttribMinWidth = 2,
VASurfaceAttribMaxWidth = 3,
VASurfaceAttribMinHeight = 4,
VASurfaceAttribMaxHeight = 5,
VASurfaceAttribMemoryType = 6,
VASurfaceAttribExternalBufferDescriptor = 7,
VASurfaceAttribUsageHint = 8,
VASurfaceAttribCount = 9,
}
pub use self::_bindgen_ty_6 as VASurfaceAttribType;
/** \brief Surface attribute. */
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASurfaceAttrib {
/** \brief Type. */
pub type_: VASurfaceAttribType,
/** \brief Flags. See "Surface attribute flags". */
pub flags: ::std::os::raw::c_uint,
/** \brief Value. See "Surface attribute types" for the expected types. */
pub value: VAGenericValue,
}
#[test]
fn bindgen_test_layout__VASurfaceAttrib() {
assert_eq!(::std::mem::size_of::<_VASurfaceAttrib>() , 24usize);
assert_eq!(::std::mem::align_of::<_VASurfaceAttrib>() , 8usize);
}
impl Clone for _VASurfaceAttrib {
fn clone(&self) -> Self { *self }
}
pub type VASurfaceAttrib = _VASurfaceAttrib;
/**
* \brief VASurfaceAttribExternalBuffers structure for
* the VASurfaceAttribExternalBufferDescriptor attribute.
*/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASurfaceAttribExternalBuffers {
/** \brief pixel format in fourcc. */
pub pixel_format: ::std::os::raw::c_uint,
/** \brief width in pixels. */
pub width: ::std::os::raw::c_uint,
/** \brief height in pixels. */
pub height: ::std::os::raw::c_uint,
/** \brief total size of the buffer in bytes. */
pub data_size: ::std::os::raw::c_uint,
/** \brief number of planes for planar layout */
pub num_planes: ::std::os::raw::c_uint,
/** \brief pitch for each plane in bytes */
pub pitches: [::std::os::raw::c_uint; 4usize],
/** \brief offset for each plane in bytes */
pub offsets: [::std::os::raw::c_uint; 4usize],
/** \brief buffer handles or user pointers */
pub buffers: *mut ::std::os::raw::c_ulong,
/** \brief number of elements in the "buffers" array */
pub num_buffers: ::std::os::raw::c_uint,
/** \brief flags. See "Surface external buffer descriptor flags". */
pub flags: ::std::os::raw::c_uint,
/** \brief reserved for passing private data */
pub private_data: *mut ::std::os::raw::c_void,
}
#[test]
fn bindgen_test_layout__VASurfaceAttribExternalBuffers() {
assert_eq!(::std::mem::size_of::<_VASurfaceAttribExternalBuffers>() ,
80usize);
assert_eq!(::std::mem::align_of::<_VASurfaceAttribExternalBuffers>() ,
8usize);
}
impl Clone for _VASurfaceAttribExternalBuffers {
fn clone(&self) -> Self { *self }
}
pub type VASurfaceAttribExternalBuffers = _VASurfaceAttribExternalBuffers;
extern "C" {
/**
* \brief Queries surface attributes for the supplied config.
*
* Unlike vaGetSurfaceAttributes(), this function queries for all
* supported attributes for the supplied VA @config. In particular, if
* the underlying hardware supports the creation of VA surfaces in
* various formats, then this function will enumerate all pixel
* formats that are supported.
*
* The \c attrib_list array is allocated by the user and \c
* num_attribs shall be initialized to the number of allocated
* elements in that array. Upon successful return, the actual number
* of attributes will be overwritten into \c num_attribs. Otherwise,
* \c VA_STATUS_ERROR_MAX_NUM_EXCEEDED is returned and \c num_attribs
* is adjusted to the number of elements that would be returned if
* enough space was available.
*
* Note: it is perfectly valid to pass NULL to the \c attrib_list
* argument when vaQuerySurfaceAttributes() is used to determine the
* actual number of elements that need to be allocated.
*
* @param[in] dpy the VA display
* @param[in] config the config identifying a codec or a video
* processing pipeline
* @param[out] attrib_list the output array of #VASurfaceAttrib elements
* @param[in,out] num_attribs the number of elements allocated on
* input, the number of elements actually filled in output
*/
pub fn vaQuerySurfaceAttributes(dpy: VADisplay, config: VAConfigID,
attrib_list: *mut VASurfaceAttrib,
num_attribs: *mut ::std::os::raw::c_uint)
-> VAStatus;
}
extern "C" {
/**
* \brief Creates an array of surfaces
*
* Creates an array of surfaces. The optional list of attributes shall
* be constructed and validated through vaGetSurfaceAttributes() or
* constructed based based on what the underlying hardware could
* expose through vaQuerySurfaceAttributes().
*
* @param[in] dpy the VA display
* @param[in] format the desired surface format. See \c VA_RT_FORMAT_*
* @param[in] width the surface width
* @param[in] height the surface height
* @param[out] surfaces the array of newly created surfaces
* @param[in] num_surfaces the number of surfaces to create
* @param[in] attrib_list the list of (optional) attributes, or \c NULL
* @param[in] num_attribs the number of attributes supplied in
* \c attrib_list, or zero
*/
pub fn vaCreateSurfaces(dpy: VADisplay, format: ::std::os::raw::c_uint,
width: ::std::os::raw::c_uint,
height: ::std::os::raw::c_uint,
surfaces: *mut VASurfaceID,
num_surfaces: ::std::os::raw::c_uint,
attrib_list: *mut VASurfaceAttrib,
num_attribs: ::std::os::raw::c_uint) -> VAStatus;
}
extern "C" {
/**
* vaDestroySurfaces - Destroy resources associated with surfaces.
* Surfaces can only be destroyed after the context associated has been
* destroyed.
* dpy: display
* surfaces: array of surfaces to destroy
* num_surfaces: number of surfaces in the array to be destroyed.
*/
pub fn vaDestroySurfaces(dpy: VADisplay, surfaces: *mut VASurfaceID,
num_surfaces: ::std::os::raw::c_int) -> VAStatus;
}
extern "C" {
/**
* vaCreateContext - Create a context
* dpy: display
* config_id: configuration for the context
* picture_width: coded picture width
* picture_height: coded picture height
* flag: any combination of the following:
* VA_PROGRESSIVE (only progressive frame pictures in the sequence when set)
* render_targets: render targets (surfaces) tied to the context
* num_render_targets: number of render targets in the above array
* context: created context id upon return
*/
pub fn vaCreateContext(dpy: VADisplay, config_id: VAConfigID,
picture_width: ::std::os::raw::c_int,
picture_height: ::std::os::raw::c_int,
flag: ::std::os::raw::c_int,
render_targets: *mut VASurfaceID,
num_render_targets: ::std::os::raw::c_int,
context: *mut VAContextID) -> VAStatus;
}
extern "C" {
/**
* vaDestroyContext - Destroy a context
* dpy: display
* context: context to be destroyed
*/
pub fn vaDestroyContext(dpy: VADisplay, context: VAContextID) -> VAStatus;
}
/**
* Buffers
* Buffers are used to pass various types of data from the
* client to the server. The server maintains a data store
* for each buffer created, and the client idenfies a buffer
* through a unique buffer id assigned by the server.
*/
pub type VABufferID = VAGenericID;
pub const VAPictureParameterBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAPictureParameterBufferType;
pub const VAIQMatrixBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAIQMatrixBufferType;
pub const VABitPlaneBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VABitPlaneBufferType;
pub const VASliceGroupMapBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VASliceGroupMapBufferType;
pub const VASliceParameterBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VASliceParameterBufferType;
pub const VASliceDataBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VASliceDataBufferType;
pub const VAMacroblockParameterBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAMacroblockParameterBufferType;
pub const VAResidualDataBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAResidualDataBufferType;
pub const VADeblockingParameterBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VADeblockingParameterBufferType;
pub const VAImageBufferType: _bindgen_ty_7 = _bindgen_ty_7::VAImageBufferType;
pub const VAProtectedSliceDataBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAProtectedSliceDataBufferType;
pub const VAQMatrixBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAQMatrixBufferType;
pub const VAHuffmanTableBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAHuffmanTableBufferType;
pub const VAProbabilityBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAProbabilityBufferType;
pub const VAEncCodedBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAEncCodedBufferType;
pub const VAEncSequenceParameterBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAEncSequenceParameterBufferType;
pub const VAEncPictureParameterBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAEncPictureParameterBufferType;
pub const VAEncSliceParameterBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAEncSliceParameterBufferType;
pub const VAEncPackedHeaderParameterBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAEncPackedHeaderParameterBufferType;
pub const VAEncPackedHeaderDataBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAEncPackedHeaderDataBufferType;
pub const VAEncMiscParameterBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAEncMiscParameterBufferType;
pub const VAEncMacroblockParameterBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAEncMacroblockParameterBufferType;
pub const VAEncMacroblockMapBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAEncMacroblockMapBufferType;
pub const VAProcPipelineParameterBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAProcPipelineParameterBufferType;
pub const VAProcFilterParameterBufferType: _bindgen_ty_7 =
_bindgen_ty_7::VAProcFilterParameterBufferType;
pub const VABufferTypeMax: _bindgen_ty_7 = _bindgen_ty_7::VABufferTypeMax;
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_7 {
VAPictureParameterBufferType = 0,
VAIQMatrixBufferType = 1,
VABitPlaneBufferType = 2,
VASliceGroupMapBufferType = 3,
VASliceParameterBufferType = 4,
VASliceDataBufferType = 5,
VAMacroblockParameterBufferType = 6,
VAResidualDataBufferType = 7,
VADeblockingParameterBufferType = 8,
VAImageBufferType = 9,
VAProtectedSliceDataBufferType = 10,
VAQMatrixBufferType = 11,
VAHuffmanTableBufferType = 12,
VAProbabilityBufferType = 13,
VAEncCodedBufferType = 21,
VAEncSequenceParameterBufferType = 22,
VAEncPictureParameterBufferType = 23,
VAEncSliceParameterBufferType = 24,
VAEncPackedHeaderParameterBufferType = 25,
VAEncPackedHeaderDataBufferType = 26,
VAEncMiscParameterBufferType = 27,
VAEncMacroblockParameterBufferType = 28,
VAEncMacroblockMapBufferType = 29,
VAProcPipelineParameterBufferType = 41,
VAProcFilterParameterBufferType = 42,
VABufferTypeMax = 43,
}
pub use self::_bindgen_ty_7 as VABufferType;
pub const VAEncMiscParameterTypeFrameRate: _bindgen_ty_8 =
_bindgen_ty_8::VAEncMiscParameterTypeFrameRate;
pub const VAEncMiscParameterTypeRateControl: _bindgen_ty_8 =
_bindgen_ty_8::VAEncMiscParameterTypeRateControl;
pub const VAEncMiscParameterTypeMaxSliceSize: _bindgen_ty_8 =
_bindgen_ty_8::VAEncMiscParameterTypeMaxSliceSize;
pub const VAEncMiscParameterTypeAIR: _bindgen_ty_8 =
_bindgen_ty_8::VAEncMiscParameterTypeAIR;
pub const VAEncMiscParameterTypeMaxFrameSize: _bindgen_ty_8 =
_bindgen_ty_8::VAEncMiscParameterTypeMaxFrameSize;
pub const VAEncMiscParameterTypeHRD: _bindgen_ty_8 =
_bindgen_ty_8::VAEncMiscParameterTypeHRD;
pub const VAEncMiscParameterTypeQualityLevel: _bindgen_ty_8 =
_bindgen_ty_8::VAEncMiscParameterTypeQualityLevel;
pub const VAEncMiscParameterTypeSkipFrame: _bindgen_ty_8 =
_bindgen_ty_8::VAEncMiscParameterTypeSkipFrame;
pub const VAEncMiscParameterTypeROI: _bindgen_ty_8 =
_bindgen_ty_8::VAEncMiscParameterTypeROI;
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_8 {
VAEncMiscParameterTypeFrameRate = 0,
VAEncMiscParameterTypeRateControl = 1,
VAEncMiscParameterTypeMaxSliceSize = 2,
VAEncMiscParameterTypeAIR = 3,
VAEncMiscParameterTypeMaxFrameSize = 4,
VAEncMiscParameterTypeHRD = 5,
VAEncMiscParameterTypeQualityLevel = 6,
VAEncMiscParameterTypeSkipFrame = 9,
VAEncMiscParameterTypeROI = 10,
}
pub use self::_bindgen_ty_8 as VAEncMiscParameterType;
pub const VAEncPackedHeaderSequence: _bindgen_ty_9 =
_bindgen_ty_9::VAEncPackedHeaderSequence;
pub const VAEncPackedHeaderPicture: _bindgen_ty_9 =
_bindgen_ty_9::VAEncPackedHeaderPicture;
pub const VAEncPackedHeaderSlice: _bindgen_ty_9 =
_bindgen_ty_9::VAEncPackedHeaderSlice;
pub const VAEncPackedHeaderRawData: _bindgen_ty_9 =
_bindgen_ty_9::VAEncPackedHeaderRawData;
pub const VAEncPackedHeaderMiscMask: _bindgen_ty_9 =
_bindgen_ty_9::VAEncPackedHeaderMiscMask;
#[repr(u32)]
/** \brief Packed header type. */
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_9 {
VAEncPackedHeaderSequence = 1,
VAEncPackedHeaderPicture = 2,
VAEncPackedHeaderSlice = 3,
VAEncPackedHeaderRawData = 4,
VAEncPackedHeaderMiscMask = 2147483648,
}
pub use self::_bindgen_ty_9 as VAEncPackedHeaderType;
/** \brief Packed header parameter. */
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPackedHeaderParameterBuffer {
/** Type of the packed header buffer. See #VAEncPackedHeaderType. */
pub type_: ::std::os::raw::c_uint,
/** \brief Size of the #VAEncPackedHeaderDataBuffer in bits. */
pub bit_length: ::std::os::raw::c_uint,
/** \brief Flag: buffer contains start code emulation prevention bytes? */
pub has_emulation_bytes: ::std::os::raw::c_uchar,
}
#[test]
fn bindgen_test_layout__VAEncPackedHeaderParameterBuffer() {
assert_eq!(::std::mem::size_of::<_VAEncPackedHeaderParameterBuffer>() ,
12usize);
assert_eq!(::std::mem::align_of::<_VAEncPackedHeaderParameterBuffer>() ,
4usize);
}
impl Clone for _VAEncPackedHeaderParameterBuffer {
fn clone(&self) -> Self { *self }
}
pub type VAEncPackedHeaderParameterBuffer = _VAEncPackedHeaderParameterBuffer;
/**
* For application, e.g. set a new bitrate
* VABufferID buf_id;
* VAEncMiscParameterBuffer *misc_param;
* VAEncMiscParameterRateControl *misc_rate_ctrl;
*
* vaCreateBuffer(dpy, context, VAEncMiscParameterBufferType,
* sizeof(VAEncMiscParameterBuffer) + sizeof(VAEncMiscParameterRateControl),
* 1, NULL, &buf_id);
*
* vaMapBuffer(dpy,buf_id,(void **)&misc_param);
* misc_param->type = VAEncMiscParameterTypeRateControl;
* misc_rate_ctrl= (VAEncMiscParameterRateControl *)misc_param->data;
* misc_rate_ctrl->bits_per_second = 6400000;
* vaUnmapBuffer(dpy, buf_id);
* vaRenderPicture(dpy, context, &buf_id, 1);
*/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMiscParameterBuffer {
pub type_: VAEncMiscParameterType,
pub data: [::std::os::raw::c_uint; 0usize],
}
#[test]
fn bindgen_test_layout__VAEncMiscParameterBuffer() {
assert_eq!(::std::mem::size_of::<_VAEncMiscParameterBuffer>() , 4usize);
assert_eq!(::std::mem::align_of::<_VAEncMiscParameterBuffer>() , 4usize);
}
impl Clone for _VAEncMiscParameterBuffer {
fn clone(&self) -> Self { *self }
}
pub type VAEncMiscParameterBuffer = _VAEncMiscParameterBuffer;
/** \brief Rate control parameters */
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMiscParameterRateControl {
pub bits_per_second: ::std::os::raw::c_uint,
pub target_percentage: ::std::os::raw::c_uint,
pub window_size: ::std::os::raw::c_uint,
pub initial_qp: ::std::os::raw::c_uint,
pub min_qp: ::std::os::raw::c_uint,
pub basic_unit_size: ::std::os::raw::c_uint,
pub rc_flags: _VAEncMiscParameterRateControl__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMiscParameterRateControl__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAEncMiscParameterRateControl__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMiscParameterRateControl__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u8,
}
#[test]
fn bindgen_test_layout__VAEncMiscParameterRateControl__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncMiscParameterRateControl__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncMiscParameterRateControl__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncMiscParameterRateControl__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncMiscParameterRateControl__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn reset(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u8)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_reset(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 0u32) & (1usize as u8);
}
#[inline]
pub fn disable_frame_skip(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u8)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_disable_frame_skip(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 1u32) & (2usize as u8);
}
#[inline]
pub fn disable_bit_stuffing(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u8)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_disable_bit_stuffing(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 2u32) & (4usize as u8);
}
#[inline]
pub fn mb_rate_control(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (120usize as u8)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_mb_rate_control(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(120usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 3u32) & (120usize as u8);
}
}
#[test]
fn bindgen_test_layout__VAEncMiscParameterRateControl__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncMiscParameterRateControl__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncMiscParameterRateControl__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncMiscParameterRateControl__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncMiscParameterRateControl() {
assert_eq!(::std::mem::size_of::<_VAEncMiscParameterRateControl>() ,
28usize);
assert_eq!(::std::mem::align_of::<_VAEncMiscParameterRateControl>() ,
4usize);
}
impl Clone for _VAEncMiscParameterRateControl {
fn clone(&self) -> Self { *self }
}
pub type VAEncMiscParameterRateControl = _VAEncMiscParameterRateControl;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMiscParameterFrameRate {
pub framerate: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VAEncMiscParameterFrameRate() {
assert_eq!(::std::mem::size_of::<_VAEncMiscParameterFrameRate>() ,
4usize);
assert_eq!(::std::mem::align_of::<_VAEncMiscParameterFrameRate>() ,
4usize);
}
impl Clone for _VAEncMiscParameterFrameRate {
fn clone(&self) -> Self { *self }
}
pub type VAEncMiscParameterFrameRate = _VAEncMiscParameterFrameRate;
/**
* Allow a maximum slice size to be specified (in bits).
* The encoder will attempt to make sure that individual slices do not exceed this size
* Or to signal applicate if the slice size exceed this size, see "status" of VACodedBufferSegment
*/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMiscParameterMaxSliceSize {
pub max_slice_size: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VAEncMiscParameterMaxSliceSize() {
assert_eq!(::std::mem::size_of::<_VAEncMiscParameterMaxSliceSize>() ,
4usize);
assert_eq!(::std::mem::align_of::<_VAEncMiscParameterMaxSliceSize>() ,
4usize);
}
impl Clone for _VAEncMiscParameterMaxSliceSize {
fn clone(&self) -> Self { *self }
}
pub type VAEncMiscParameterMaxSliceSize = _VAEncMiscParameterMaxSliceSize;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMiscParameterAIR {
pub air_num_mbs: ::std::os::raw::c_uint,
pub air_threshold: ::std::os::raw::c_uint,
pub air_auto: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VAEncMiscParameterAIR() {
assert_eq!(::std::mem::size_of::<_VAEncMiscParameterAIR>() , 12usize);
assert_eq!(::std::mem::align_of::<_VAEncMiscParameterAIR>() , 4usize);
}
impl Clone for _VAEncMiscParameterAIR {
fn clone(&self) -> Self { *self }
}
pub type VAEncMiscParameterAIR = _VAEncMiscParameterAIR;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMiscParameterHRD {
pub initial_buffer_fullness: ::std::os::raw::c_uint,
pub buffer_size: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VAEncMiscParameterHRD() {
assert_eq!(::std::mem::size_of::<_VAEncMiscParameterHRD>() , 8usize);
assert_eq!(::std::mem::align_of::<_VAEncMiscParameterHRD>() , 4usize);
}
impl Clone for _VAEncMiscParameterHRD {
fn clone(&self) -> Self { *self }
}
pub type VAEncMiscParameterHRD = _VAEncMiscParameterHRD;
/**
* \brief Defines a maximum frame size (in bits).
*
* This misc parameter buffer defines the maximum size of a frame (in
* bits). The encoder will try to make sure that each frame does not
* exceed this size. Otherwise, if the frame size exceeds this size,
* the \c status flag of #VACodedBufferSegment will contain
* #VA_CODED_BUF_STATUS_FRAME_SIZE_OVERFLOW.
*/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMiscParameterBufferMaxFrameSize {
/** \brief Type. Shall be set to #VAEncMiscParameterTypeMaxFrameSize. */
pub type_: VAEncMiscParameterType,
/** \brief Maximum size of a frame (in bits). */
pub max_frame_size: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VAEncMiscParameterBufferMaxFrameSize() {
assert_eq!(::std::mem::size_of::<_VAEncMiscParameterBufferMaxFrameSize>()
, 8usize);
assert_eq!(::std::mem::align_of::<_VAEncMiscParameterBufferMaxFrameSize>()
, 4usize);
}
impl Clone for _VAEncMiscParameterBufferMaxFrameSize {
fn clone(&self) -> Self { *self }
}
pub type VAEncMiscParameterBufferMaxFrameSize =
_VAEncMiscParameterBufferMaxFrameSize;
/**
* \brief Encoding quality level.
*
* The encoding quality could be set through this structure, if the implementation
* supports multiple quality levels. The quality level set through this structure is
* persistent over the entire coded sequence, or until a new structure is being sent.
* The quality level range can be queried through the VAConfigAttribEncQualityRange
* attribute. A lower value means higher quality, and a value of 1 represents the highest
* quality. The quality level setting is used as a trade-off between quality and speed/power
* consumption, with higher quality corresponds to lower speed and higher power consumption.
*/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMiscParameterBufferQualityLevel {
/** \brief Encoding quality level setting. When set to 0, default quality
* level is used.
*/
pub quality_level: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VAEncMiscParameterBufferQualityLevel() {
assert_eq!(::std::mem::size_of::<_VAEncMiscParameterBufferQualityLevel>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncMiscParameterBufferQualityLevel>()
, 4usize);
}
impl Clone for _VAEncMiscParameterBufferQualityLevel {
fn clone(&self) -> Self { *self }
}
pub type VAEncMiscParameterBufferQualityLevel =
_VAEncMiscParameterBufferQualityLevel;
/**
* \brief Encoding skip frame.
*
* The application may choose to skip frames externally to the encoder (e.g. drop completely or
* code as all skip's). For rate control purposes the encoder will need to know the size and number
* of skipped frames. Skip frame(s) indicated through this structure is applicable only to the
* current frame. It is allowed for the application to still send in packed headers for the driver to
* pack, although no frame will be encoded (e.g. for HW to encrypt the frame).
*/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMiscParameterSkipFrame {
/** \brief Indicates skip frames as below.
* 0: Encode as normal, no skip.
* 1: One or more frames were skipped prior to the current frame, encode the current frame as normal.
* 2: The current frame is to be skipped, do not encode it but pack/encrypt the packed header contents
* (all except VAEncPackedHeaderSlice) which could contain actual frame contents (e.g. pack the frame
* in VAEncPackedHeaderPicture). */
pub skip_frame_flag: ::std::os::raw::c_uchar,
/** \brief The number of frames skipped prior to the current frame. Valid when skip_frame_flag = 1. */
pub num_skip_frames: ::std::os::raw::c_uchar,
/** \brief When skip_frame_flag = 1, the size of the skipped frames in bits. When skip_frame_flag = 2,
* the size of the current skipped frame that is to be packed/encrypted in bits. */
pub size_skip_frames: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VAEncMiscParameterSkipFrame() {
assert_eq!(::std::mem::size_of::<_VAEncMiscParameterSkipFrame>() ,
8usize);
assert_eq!(::std::mem::align_of::<_VAEncMiscParameterSkipFrame>() ,
4usize);
}
impl Clone for _VAEncMiscParameterSkipFrame {
fn clone(&self) -> Self { *self }
}
pub type VAEncMiscParameterSkipFrame = _VAEncMiscParameterSkipFrame;
/**
* \brief Encoding region-of-interest (ROI).
*
* The encoding ROI can be set through VAEncMiscParameterBufferROI, if the implementation
* supports ROI input. The ROI set through this structure is applicable only to the
* current frame or field, so must be sent every frame or field to be applied. The number of
* supported ROIs can be queried through the VAConfigAttribEncROI. The encoder will use the
* ROI information to adjust the QP values of the MB's that fall within the ROIs.
*/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncROI {
/** \brief Defines the ROI boundary in pixels, the driver will map it to appropriate
* codec coding units. It is relative to frame coordinates for the frame case and
* to field coordinates for the field case. */
pub roi_rectangle: VARectangle,
/** \brief When VAConfigAttribRateControl == VA_RC_CQP then roi_value specifes the
* delta QP that will be added on top of the frame level QP. For other rate control
* modes, roi_value specifies the priority of the ROI region relative to the non-ROI
* region. It can be positive (more important) or negative (less important) values
* and is compared with non-ROI region (taken as value 0).
* E.g. ROI region with roi_value -3 is less important than the non-ROI region
* (roi_value implied to be 0) which is less important than ROI region with
* roi_value +2. For overlapping regions, the roi_value that is first in the ROI
* array will have priority. */
pub roi_value: ::std::os::raw::c_char,
}
#[test]
fn bindgen_test_layout__VAEncROI() {
assert_eq!(::std::mem::size_of::<_VAEncROI>() , 10usize);
assert_eq!(::std::mem::align_of::<_VAEncROI>() , 2usize);
}
impl Clone for _VAEncROI {
fn clone(&self) -> Self { *self }
}
pub type VAEncROI = _VAEncROI;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMiscParameterBufferROI {
/** \brief Number of ROIs being sent.*/
pub num_roi: ::std::os::raw::c_uint,
/** \brief Valid when VAConfigAttribRateControl != VA_RC_CQP, then the encoder's
* rate control will determine actual delta QPs. Specifies the max/min allowed delta
* QPs. */
pub max_delta_qp: ::std::os::raw::c_char,
pub min_delta_qp: ::std::os::raw::c_char,
/** \brief Pointer to a VAEncROI array with num_roi elements. It is relative to frame
* coordinates for the frame case and to field coordinates for the field case.*/
pub roi: *mut VAEncROI,
}
#[test]
fn bindgen_test_layout__VAEncMiscParameterBufferROI() {
assert_eq!(::std::mem::size_of::<_VAEncMiscParameterBufferROI>() ,
16usize);
assert_eq!(::std::mem::align_of::<_VAEncMiscParameterBufferROI>() ,
8usize);
}
impl Clone for _VAEncMiscParameterBufferROI {
fn clone(&self) -> Self { *self }
}
pub type VAEncMiscParameterBufferROI = _VAEncMiscParameterBufferROI;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASliceParameterBufferBase {
pub slice_data_size: ::std::os::raw::c_uint,
pub slice_data_offset: ::std::os::raw::c_uint,
pub slice_data_flag: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VASliceParameterBufferBase() {
assert_eq!(::std::mem::size_of::<_VASliceParameterBufferBase>() ,
12usize);
assert_eq!(::std::mem::align_of::<_VASliceParameterBufferBase>() ,
4usize);
}
impl Clone for _VASliceParameterBufferBase {
fn clone(&self) -> Self { *self }
}
pub type VASliceParameterBufferBase = _VASliceParameterBufferBase;
/**********************************
* JPEG common data structures
**********************************/
/**
* \brief Huffman table for JPEG decoding.
*
* This structure holds the complete Huffman tables. This is an
* aggregation of all Huffman table (DHT) segments maintained by the
* application. i.e. up to 2 Huffman tables are stored in there for
* baseline profile.
*
* The #load_huffman_table array can be used as a hint to notify the
* VA driver implementation about which table(s) actually changed
* since the last submission of this buffer.
*/
#[repr(C)]
pub struct _VAHuffmanTableBufferJPEGBaseline {
/** \brief Specifies which #huffman_table is valid. */
pub load_huffman_table: [::std::os::raw::c_uchar; 2usize],
pub huffman_table: [_VAHuffmanTableBufferJPEGBaseline__bindgen_ty_1; 2usize],
}
/** \brief Huffman tables indexed by table identifier (Th). */
#[repr(C)]
pub struct _VAHuffmanTableBufferJPEGBaseline__bindgen_ty_1 {
/** @name DC table (up to 12 categories) */
/**@{*/
/** \brief Number of Huffman codes of length i + 1 (Li). */
pub num_dc_codes: [::std::os::raw::c_uchar; 16usize],
/** \brief Value associated with each Huffman code (Vij). */
pub dc_values: [::std::os::raw::c_uchar; 12usize],
/**@}*/
/** @name AC table (2 special codes + up to 16 * 10 codes) */
/**@{*/
/** \brief Number of Huffman codes of length i + 1 (Li). */
pub num_ac_codes: [::std::os::raw::c_uchar; 16usize],
/** \brief Value associated with each Huffman code (Vij). */
pub ac_values: [::std::os::raw::c_uchar; 162usize],
/** \brief Padding to 4-byte boundaries. Must be set to zero. */
pub pad: [::std::os::raw::c_uchar; 2usize],
}
#[test]
fn bindgen_test_layout__VAHuffmanTableBufferJPEGBaseline__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAHuffmanTableBufferJPEGBaseline__bindgen_ty_1>()
, 208usize);
assert_eq!(::std::mem::align_of::<_VAHuffmanTableBufferJPEGBaseline__bindgen_ty_1>()
, 1usize);
}
#[test]
fn bindgen_test_layout__VAHuffmanTableBufferJPEGBaseline() {
assert_eq!(::std::mem::size_of::<_VAHuffmanTableBufferJPEGBaseline>() ,
418usize);
assert_eq!(::std::mem::align_of::<_VAHuffmanTableBufferJPEGBaseline>() ,
1usize);
}
pub type VAHuffmanTableBufferJPEGBaseline = _VAHuffmanTableBufferJPEGBaseline;
/****************************
* MPEG-2 data structures
****************************/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferMPEG2 {
pub horizontal_size: ::std::os::raw::c_ushort,
pub vertical_size: ::std::os::raw::c_ushort,
pub forward_reference_picture: VASurfaceID,
pub backward_reference_picture: VASurfaceID,
pub picture_coding_type: ::std::os::raw::c_int,
pub f_code: ::std::os::raw::c_int,
pub picture_coding_extension: _VAPictureParameterBufferMPEG2__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferMPEG2__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u16,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn intra_dc_precision(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3usize as u16)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_intra_dc_precision(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(3usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 0u32) & (3usize as u16);
}
#[inline]
pub fn picture_structure(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12usize as u16)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_picture_structure(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(12usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 2u32) & (12usize as u16);
}
#[inline]
pub fn top_field_first(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u16)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_top_field_first(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 4u32) & (16usize as u16);
}
#[inline]
pub fn frame_pred_frame_dct(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u16)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_frame_pred_frame_dct(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(32usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 5u32) & (32usize as u16);
}
#[inline]
pub fn concealment_motion_vectors(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u16)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_concealment_motion_vectors(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(64usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 6u32) & (64usize as u16);
}
#[inline]
pub fn q_scale_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u16)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_q_scale_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(128usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 7u32) & (128usize as u16);
}
#[inline]
pub fn intra_vlc_format(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u16)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_intra_vlc_format(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(256usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 8u32) & (256usize as u16);
}
#[inline]
pub fn alternate_scan(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (512usize as u16)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_alternate_scan(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(512usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 9u32) & (512usize as u16);
}
#[inline]
pub fn repeat_first_field(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1024usize as u16)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_repeat_first_field(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1024usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 10u32) & (1024usize as u16);
}
#[inline]
pub fn progressive_frame(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2048usize as u16)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_progressive_frame(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2048usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 11u32) & (2048usize as u16);
}
#[inline]
pub fn is_first_field(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4096usize as u16)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_is_first_field(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4096usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 12u32) & (4096usize as u16);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferMPEG2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferMPEG2__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferMPEG2__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferMPEG2__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferMPEG2() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferMPEG2>() ,
24usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferMPEG2>() ,
4usize);
}
impl Clone for _VAPictureParameterBufferMPEG2 {
fn clone(&self) -> Self { *self }
}
pub type VAPictureParameterBufferMPEG2 = _VAPictureParameterBufferMPEG2;
/** MPEG-2 Inverse Quantization Matrix Buffer */
#[repr(C)]
pub struct _VAIQMatrixBufferMPEG2 {
/** \brief Same as the MPEG-2 bitstream syntax element. */
pub load_intra_quantiser_matrix: ::std::os::raw::c_int,
/** \brief Same as the MPEG-2 bitstream syntax element. */
pub load_non_intra_quantiser_matrix: ::std::os::raw::c_int,
/** \brief Same as the MPEG-2 bitstream syntax element. */
pub load_chroma_intra_quantiser_matrix: ::std::os::raw::c_int,
/** \brief Same as the MPEG-2 bitstream syntax element. */
pub load_chroma_non_intra_quantiser_matrix: ::std::os::raw::c_int,
/** \brief Luminance intra matrix, in zig-zag scan order. */
pub intra_quantiser_matrix: [::std::os::raw::c_uchar; 64usize],
/** \brief Luminance non-intra matrix, in zig-zag scan order. */
pub non_intra_quantiser_matrix: [::std::os::raw::c_uchar; 64usize],
/** \brief Chroma intra matrix, in zig-zag scan order. */
pub chroma_intra_quantiser_matrix: [::std::os::raw::c_uchar; 64usize],
/** \brief Chroma non-intra matrix, in zig-zag scan order. */
pub chroma_non_intra_quantiser_matrix: [::std::os::raw::c_uchar; 64usize],
}
#[test]
fn bindgen_test_layout__VAIQMatrixBufferMPEG2() {
assert_eq!(::std::mem::size_of::<_VAIQMatrixBufferMPEG2>() , 272usize);
assert_eq!(::std::mem::align_of::<_VAIQMatrixBufferMPEG2>() , 4usize);
}
pub type VAIQMatrixBufferMPEG2 = _VAIQMatrixBufferMPEG2;
/** MPEG-2 Slice Parameter Buffer */
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASliceParameterBufferMPEG2 {
pub slice_data_size: ::std::os::raw::c_uint,
pub slice_data_offset: ::std::os::raw::c_uint,
pub slice_data_flag: ::std::os::raw::c_uint,
pub macroblock_offset: ::std::os::raw::c_uint,
pub slice_horizontal_position: ::std::os::raw::c_uint,
pub slice_vertical_position: ::std::os::raw::c_uint,
pub quantiser_scale_code: ::std::os::raw::c_int,
pub intra_slice_flag: ::std::os::raw::c_int,
}
#[test]
fn bindgen_test_layout__VASliceParameterBufferMPEG2() {
assert_eq!(::std::mem::size_of::<_VASliceParameterBufferMPEG2>() ,
32usize);
assert_eq!(::std::mem::align_of::<_VASliceParameterBufferMPEG2>() ,
4usize);
}
impl Clone for _VASliceParameterBufferMPEG2 {
fn clone(&self) -> Self { *self }
}
pub type VASliceParameterBufferMPEG2 = _VASliceParameterBufferMPEG2;
/** MPEG-2 Macroblock Parameter Buffer */
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAMacroblockParameterBufferMPEG2 {
pub macroblock_address: ::std::os::raw::c_ushort,
pub macroblock_type: ::std::os::raw::c_uchar,
pub macroblock_modes: _VAMacroblockParameterBufferMPEG2__bindgen_ty_1,
pub motion_vertical_field_select: ::std::os::raw::c_uchar,
pub PMV: [[[::std::os::raw::c_short; 2usize]; 2usize]; 2usize],
pub coded_block_pattern: ::std::os::raw::c_ushort,
pub num_skipped_macroblocks: ::std::os::raw::c_ushort,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAMacroblockParameterBufferMPEG2__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAMacroblockParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAMacroblockParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u8,
}
#[test]
fn bindgen_test_layout__VAMacroblockParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAMacroblockParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAMacroblockParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAMacroblockParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAMacroblockParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn frame_motion_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3usize as u8)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_frame_motion_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(3usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 0u32) & (3usize as u8);
}
#[inline]
pub fn field_motion_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12usize as u8)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_field_motion_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(12usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 2u32) & (12usize as u8);
}
#[inline]
pub fn dct_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u8)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_dct_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 4u32) & (16usize as u8);
}
}
#[test]
fn bindgen_test_layout__VAMacroblockParameterBufferMPEG2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAMacroblockParameterBufferMPEG2__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAMacroblockParameterBufferMPEG2__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAMacroblockParameterBufferMPEG2__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAMacroblockParameterBufferMPEG2() {
assert_eq!(::std::mem::size_of::<_VAMacroblockParameterBufferMPEG2>() ,
32usize);
assert_eq!(::std::mem::align_of::<_VAMacroblockParameterBufferMPEG2>() ,
4usize);
}
impl Clone for _VAMacroblockParameterBufferMPEG2 {
fn clone(&self) -> Self { *self }
}
pub type VAMacroblockParameterBufferMPEG2 = _VAMacroblockParameterBufferMPEG2;
/****************************
* MPEG-4 Part 2 data structures
****************************/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferMPEG4 {
pub vop_width: ::std::os::raw::c_ushort,
pub vop_height: ::std::os::raw::c_ushort,
pub forward_reference_picture: VASurfaceID,
pub backward_reference_picture: VASurfaceID,
pub vol_fields: _VAPictureParameterBufferMPEG4__bindgen_ty_1,
pub no_of_sprite_warping_points: ::std::os::raw::c_uchar,
pub sprite_trajectory_du: [::std::os::raw::c_short; 3usize],
pub sprite_trajectory_dv: [::std::os::raw::c_short; 3usize],
pub quant_precision: ::std::os::raw::c_uchar,
pub vop_fields: _VAPictureParameterBufferMPEG4__bindgen_ty_2,
pub vop_fcode_forward: ::std::os::raw::c_uchar,
pub vop_fcode_backward: ::std::os::raw::c_uchar,
pub vop_time_increment_resolution: ::std::os::raw::c_ushort,
pub num_gobs_in_vop: ::std::os::raw::c_uchar,
pub num_macroblocks_in_gob: ::std::os::raw::c_uchar,
pub TRB: ::std::os::raw::c_short,
pub TRD: ::std::os::raw::c_short,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferMPEG4__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferMPEG4__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferMPEG4__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u16,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferMPEG4__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferMPEG4__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferMPEG4__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferMPEG4__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferMPEG4__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn short_video_header(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u16)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_short_video_header(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 0u32) & (1usize as u16);
}
#[inline]
pub fn chroma_format(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (6usize as u16)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_chroma_format(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(6usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 1u32) & (6usize as u16);
}
#[inline]
pub fn interlaced(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u16)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_interlaced(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(8usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 3u32) & (8usize as u16);
}
#[inline]
pub fn obmc_disable(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u16)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_obmc_disable(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 4u32) & (16usize as u16);
}
#[inline]
pub fn sprite_enable(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (96usize as u16)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_sprite_enable(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(96usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 5u32) & (96usize as u16);
}
#[inline]
pub fn sprite_warping_accuracy(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (384usize as u16)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_sprite_warping_accuracy(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(384usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 7u32) & (384usize as u16);
}
#[inline]
pub fn quant_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (512usize as u16)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_quant_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(512usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 9u32) & (512usize as u16);
}
#[inline]
pub fn quarter_sample(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1024usize as u16)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_quarter_sample(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1024usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 10u32) & (1024usize as u16);
}
#[inline]
pub fn data_partitioned(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2048usize as u16)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_data_partitioned(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2048usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 11u32) & (2048usize as u16);
}
#[inline]
pub fn reversible_vlc(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4096usize as u16)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_reversible_vlc(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4096usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 12u32) & (4096usize as u16);
}
#[inline]
pub fn resync_marker_disable(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8192usize as u16)) >>
13u32) as u32)
}
}
#[inline]
pub fn set_resync_marker_disable(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(8192usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 13u32) & (8192usize as u16);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferMPEG4__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferMPEG4__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferMPEG4__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferMPEG4__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferMPEG4__bindgen_ty_2 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferMPEG4__bindgen_ty_2__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferMPEG4__bindgen_ty_2__bindgen_ty_1 {
pub _bitfield_1: u16,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferMPEG4__bindgen_ty_2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferMPEG4__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferMPEG4__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferMPEG4__bindgen_ty_2__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferMPEG4__bindgen_ty_2__bindgen_ty_1 {
#[inline]
pub fn vop_coding_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3usize as u16)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_vop_coding_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(3usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 0u32) & (3usize as u16);
}
#[inline]
pub fn backward_reference_vop_coding_type(&self)
-> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12usize as u16)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_backward_reference_vop_coding_type(&mut self,
val:
::std::os::raw::c_uint) {
self._bitfield_1 &= !(12usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 2u32) & (12usize as u16);
}
#[inline]
pub fn vop_rounding_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u16)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_vop_rounding_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 4u32) & (16usize as u16);
}
#[inline]
pub fn intra_dc_vlc_thr(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (224usize as u16)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_intra_dc_vlc_thr(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(224usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 5u32) & (224usize as u16);
}
#[inline]
pub fn top_field_first(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u16)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_top_field_first(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(256usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 8u32) & (256usize as u16);
}
#[inline]
pub fn alternate_vertical_scan_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (512usize as u16)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_alternate_vertical_scan_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(512usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 9u32) & (512usize as u16);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferMPEG4__bindgen_ty_2() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferMPEG4__bindgen_ty_2>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferMPEG4__bindgen_ty_2>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferMPEG4__bindgen_ty_2 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferMPEG4() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferMPEG4>() ,
48usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferMPEG4>() ,
4usize);
}
impl Clone for _VAPictureParameterBufferMPEG4 {
fn clone(&self) -> Self { *self }
}
pub type VAPictureParameterBufferMPEG4 = _VAPictureParameterBufferMPEG4;
/** MPEG-4 Inverse Quantization Matrix Buffer */
#[repr(C)]
pub struct _VAIQMatrixBufferMPEG4 {
/** Same as the MPEG-4:2 bitstream syntax element. */
pub load_intra_quant_mat: ::std::os::raw::c_int,
/** Same as the MPEG-4:2 bitstream syntax element. */
pub load_non_intra_quant_mat: ::std::os::raw::c_int,
/** The matrix for intra blocks, in zig-zag scan order. */
pub intra_quant_mat: [::std::os::raw::c_uchar; 64usize],
/** The matrix for non-intra blocks, in zig-zag scan order. */
pub non_intra_quant_mat: [::std::os::raw::c_uchar; 64usize],
}
#[test]
fn bindgen_test_layout__VAIQMatrixBufferMPEG4() {
assert_eq!(::std::mem::size_of::<_VAIQMatrixBufferMPEG4>() , 136usize);
assert_eq!(::std::mem::align_of::<_VAIQMatrixBufferMPEG4>() , 4usize);
}
pub type VAIQMatrixBufferMPEG4 = _VAIQMatrixBufferMPEG4;
/** MPEG-4 Slice Parameter Buffer */
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASliceParameterBufferMPEG4 {
pub slice_data_size: ::std::os::raw::c_uint,
pub slice_data_offset: ::std::os::raw::c_uint,
pub slice_data_flag: ::std::os::raw::c_uint,
pub macroblock_offset: ::std::os::raw::c_uint,
pub macroblock_number: ::std::os::raw::c_uint,
pub quant_scale: ::std::os::raw::c_int,
}
#[test]
fn bindgen_test_layout__VASliceParameterBufferMPEG4() {
assert_eq!(::std::mem::size_of::<_VASliceParameterBufferMPEG4>() ,
24usize);
assert_eq!(::std::mem::align_of::<_VASliceParameterBufferMPEG4>() ,
4usize);
}
impl Clone for _VASliceParameterBufferMPEG4 {
fn clone(&self) -> Self { *self }
}
pub type VASliceParameterBufferMPEG4 = _VASliceParameterBufferMPEG4;
pub const VAMvMode1Mv: _bindgen_ty_10 = _bindgen_ty_10::VAMvMode1Mv;
pub const VAMvMode1MvHalfPel: _bindgen_ty_10 =
_bindgen_ty_10::VAMvMode1MvHalfPel;
pub const VAMvMode1MvHalfPelBilinear: _bindgen_ty_10 =
_bindgen_ty_10::VAMvMode1MvHalfPelBilinear;
pub const VAMvModeMixedMv: _bindgen_ty_10 = _bindgen_ty_10::VAMvModeMixedMv;
pub const VAMvModeIntensityCompensation: _bindgen_ty_10 =
_bindgen_ty_10::VAMvModeIntensityCompensation;
#[repr(u32)]
/**
VC-1 data structures
*/
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_10 {
VAMvMode1Mv = 0,
VAMvMode1MvHalfPel = 1,
VAMvMode1MvHalfPelBilinear = 2,
VAMvModeMixedMv = 3,
VAMvModeIntensityCompensation = 4,
}
pub use self::_bindgen_ty_10 as VAMvModeVC1;
/** VC-1 Picture Parameter Buffer */
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1 {
pub forward_reference_picture: VASurfaceID,
pub backward_reference_picture: VASurfaceID,
pub inloop_decoded_picture: VASurfaceID,
pub sequence_fields: _VAPictureParameterBufferVC1__bindgen_ty_1,
pub coded_width: ::std::os::raw::c_ushort,
pub coded_height: ::std::os::raw::c_ushort,
pub entrypoint_fields: _VAPictureParameterBufferVC1__bindgen_ty_2,
pub conditional_overlap_flag: ::std::os::raw::c_uchar,
pub fast_uvmc_flag: ::std::os::raw::c_uchar,
pub range_mapping_fields: _VAPictureParameterBufferVC1__bindgen_ty_3,
pub b_picture_fraction: ::std::os::raw::c_uchar,
pub cbp_table: ::std::os::raw::c_uchar,
pub mb_mode_table: ::std::os::raw::c_uchar,
pub range_reduction_frame: ::std::os::raw::c_uchar,
pub rounding_control: ::std::os::raw::c_uchar,
pub post_processing: ::std::os::raw::c_uchar,
pub picture_resolution_index: ::std::os::raw::c_uchar,
pub luma_scale: ::std::os::raw::c_uchar,
pub luma_shift: ::std::os::raw::c_uchar,
pub picture_fields: _VAPictureParameterBufferVC1__bindgen_ty_4,
pub raw_coding: _VAPictureParameterBufferVC1__bindgen_ty_5,
pub bitplane_present: _VAPictureParameterBufferVC1__bindgen_ty_6,
pub reference_fields: _VAPictureParameterBufferVC1__bindgen_ty_7,
pub mv_fields: _VAPictureParameterBufferVC1__bindgen_ty_8,
pub pic_quantizer_fields: _VAPictureParameterBufferVC1__bindgen_ty_9,
pub transform_fields: _VAPictureParameterBufferVC1__bindgen_ty_10,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferVC1__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u16,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferVC1__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn pulldown(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u16)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_pulldown(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 0u32) & (1usize as u16);
}
#[inline]
pub fn interlace(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u16)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_interlace(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 1u32) & (2usize as u16);
}
#[inline]
pub fn tfcntrflag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u16)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_tfcntrflag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 2u32) & (4usize as u16);
}
#[inline]
pub fn finterpflag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u16)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_finterpflag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(8usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 3u32) & (8usize as u16);
}
#[inline]
pub fn psf(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u16)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_psf(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 4u32) & (16usize as u16);
}
#[inline]
pub fn multires(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u16)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_multires(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(32usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 5u32) & (32usize as u16);
}
#[inline]
pub fn overlap(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u16)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_overlap(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(64usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 6u32) & (64usize as u16);
}
#[inline]
pub fn syncmarker(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u16)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_syncmarker(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(128usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 7u32) & (128usize as u16);
}
#[inline]
pub fn rangered(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u16)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_rangered(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(256usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 8u32) & (256usize as u16);
}
#[inline]
pub fn max_b_frames(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3584usize as u16)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_max_b_frames(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(3584usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 9u32) & (3584usize as u16);
}
#[inline]
pub fn profile(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12288usize as u16)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_profile(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(12288usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 12u32) & (12288usize as u16);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_2 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferVC1__bindgen_ty_2__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_2__bindgen_ty_1 {
pub _bitfield_1: u8,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_2__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferVC1__bindgen_ty_2__bindgen_ty_1 {
#[inline]
pub fn broken_link(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u8)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_broken_link(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 0u32) & (1usize as u8);
}
#[inline]
pub fn closed_entry(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u8)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_closed_entry(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 1u32) & (2usize as u8);
}
#[inline]
pub fn panscan_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u8)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_panscan_flag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 2u32) & (4usize as u8);
}
#[inline]
pub fn loopfilter(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u8)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_loopfilter(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(8usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 3u32) & (8usize as u8);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_2() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_2>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_2>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_2 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_3 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferVC1__bindgen_ty_3__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_3__bindgen_ty_1 {
pub _bitfield_1: u8,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_3__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_3__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_3__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_3__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferVC1__bindgen_ty_3__bindgen_ty_1 {
#[inline]
pub fn luma_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u8)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_luma_flag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 0u32) & (1usize as u8);
}
#[inline]
pub fn luma(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (14usize as u8)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_luma(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(14usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 1u32) & (14usize as u8);
}
#[inline]
pub fn chroma_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u8)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_chroma_flag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 4u32) & (16usize as u8);
}
#[inline]
pub fn chroma(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (224usize as u8)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_chroma(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(224usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 5u32) & (224usize as u8);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_3() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_3>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_3>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_3 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_4 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferVC1__bindgen_ty_4__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_4__bindgen_ty_1 {
pub _bitfield_1: u16,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_4__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_4__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_4__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_4__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferVC1__bindgen_ty_4__bindgen_ty_1 {
#[inline]
pub fn picture_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (7usize as u16)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_picture_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(7usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 0u32) & (7usize as u16);
}
#[inline]
pub fn frame_coding_mode(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (56usize as u16)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_frame_coding_mode(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(56usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 3u32) & (56usize as u16);
}
#[inline]
pub fn top_field_first(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u16)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_top_field_first(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(64usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 6u32) & (64usize as u16);
}
#[inline]
pub fn is_first_field(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u16)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_is_first_field(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(128usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 7u32) & (128usize as u16);
}
#[inline]
pub fn intensity_compensation(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u16)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_intensity_compensation(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(256usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 8u32) & (256usize as u16);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_4() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_4>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_4>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_4 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_5 {
pub flags: __BindgenUnionField<_VAPictureParameterBufferVC1__bindgen_ty_5__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_5__bindgen_ty_1 {
pub _bitfield_1: u8,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_5__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_5__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_5__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_5__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferVC1__bindgen_ty_5__bindgen_ty_1 {
#[inline]
pub fn mv_type_mb(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u8)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_mv_type_mb(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 0u32) & (1usize as u8);
}
#[inline]
pub fn direct_mb(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u8)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_direct_mb(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 1u32) & (2usize as u8);
}
#[inline]
pub fn skip_mb(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u8)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_skip_mb(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 2u32) & (4usize as u8);
}
#[inline]
pub fn field_tx(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u8)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_field_tx(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(8usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 3u32) & (8usize as u8);
}
#[inline]
pub fn forward_mb(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u8)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_forward_mb(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 4u32) & (16usize as u8);
}
#[inline]
pub fn ac_pred(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u8)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_ac_pred(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(32usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 5u32) & (32usize as u8);
}
#[inline]
pub fn overflags(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u8)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_overflags(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(64usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 6u32) & (64usize as u8);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_5() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_5>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_5>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_5 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_6 {
pub flags: __BindgenUnionField<_VAPictureParameterBufferVC1__bindgen_ty_6__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_6__bindgen_ty_1 {
pub _bitfield_1: u8,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_6__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_6__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_6__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_6__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferVC1__bindgen_ty_6__bindgen_ty_1 {
#[inline]
pub fn bp_mv_type_mb(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u8)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_bp_mv_type_mb(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 0u32) & (1usize as u8);
}
#[inline]
pub fn bp_direct_mb(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u8)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_bp_direct_mb(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 1u32) & (2usize as u8);
}
#[inline]
pub fn bp_skip_mb(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u8)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_bp_skip_mb(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 2u32) & (4usize as u8);
}
#[inline]
pub fn bp_field_tx(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u8)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_bp_field_tx(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(8usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 3u32) & (8usize as u8);
}
#[inline]
pub fn bp_forward_mb(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u8)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_bp_forward_mb(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 4u32) & (16usize as u8);
}
#[inline]
pub fn bp_ac_pred(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u8)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_bp_ac_pred(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(32usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 5u32) & (32usize as u8);
}
#[inline]
pub fn bp_overflags(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u8)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_bp_overflags(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(64usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 6u32) & (64usize as u8);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_6() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_6>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_6>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_6 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_7 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferVC1__bindgen_ty_7__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_7__bindgen_ty_1 {
pub _bitfield_1: u8,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_7__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_7__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_7__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_7__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferVC1__bindgen_ty_7__bindgen_ty_1 {
#[inline]
pub fn reference_distance_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u8)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_reference_distance_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 0u32) & (1usize as u8);
}
#[inline]
pub fn reference_distance(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (62usize as u8)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_reference_distance(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(62usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 1u32) & (62usize as u8);
}
#[inline]
pub fn num_reference_pictures(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u8)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_num_reference_pictures(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(64usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 6u32) & (64usize as u8);
}
#[inline]
pub fn reference_field_pic_indicator(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u8)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_reference_field_pic_indicator(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(128usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 7u32) & (128usize as u8);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_7() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_7>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_7>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_7 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_8 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferVC1__bindgen_ty_8__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_8__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_8__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_8__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_8__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_8__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferVC1__bindgen_ty_8__bindgen_ty_1 {
#[inline]
pub fn mv_mode(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (7usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_mv_mode(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(7usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (7usize as u32);
}
#[inline]
pub fn mv_mode2(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (56usize as u32)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_mv_mode2(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(56usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 3u32) & (56usize as u32);
}
#[inline]
pub fn mv_table(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (448usize as u32)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_mv_table(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(448usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 6u32) & (448usize as u32);
}
#[inline]
pub fn two_mv_block_pattern_table(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1536usize as u32)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_two_mv_block_pattern_table(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1536usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 9u32) & (1536usize as u32);
}
#[inline]
pub fn four_mv_switch(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2048usize as u32)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_four_mv_switch(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2048usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 11u32) & (2048usize as u32);
}
#[inline]
pub fn four_mv_block_pattern_table(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12288usize as u32)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_four_mv_block_pattern_table(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(12288usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 12u32) & (12288usize as u32);
}
#[inline]
pub fn extended_mv_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16384usize as u32)) >>
14u32) as u32)
}
}
#[inline]
pub fn set_extended_mv_flag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16384usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (16384usize as u32);
}
#[inline]
pub fn extended_mv_range(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (98304usize as u32)) >>
15u32) as u32)
}
}
#[inline]
pub fn set_extended_mv_range(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(98304usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 15u32) & (98304usize as u32);
}
#[inline]
pub fn extended_dmv_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (131072usize as u32))
>> 17u32) as u32)
}
}
#[inline]
pub fn set_extended_dmv_flag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(131072usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 17u32) & (131072usize as u32);
}
#[inline]
pub fn extended_dmv_range(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (786432usize as u32))
>> 18u32) as u32)
}
}
#[inline]
pub fn set_extended_dmv_range(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(786432usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 18u32) & (786432usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_8() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_8>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_8>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_8 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_9 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferVC1__bindgen_ty_9__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_9__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_9__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_9__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_9__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_9__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferVC1__bindgen_ty_9__bindgen_ty_1 {
#[inline]
pub fn dquant(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_dquant(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(3usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (3usize as u32);
}
#[inline]
pub fn quantizer(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12usize as u32)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_quantizer(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(12usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 2u32) & (12usize as u32);
}
#[inline]
pub fn half_qp(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_half_qp(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (16usize as u32);
}
#[inline]
pub fn pic_quantizer_scale(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (992usize as u32)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_pic_quantizer_scale(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(992usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 5u32) & (992usize as u32);
}
#[inline]
pub fn pic_quantizer_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1024usize as u32)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_pic_quantizer_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1024usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 10u32) & (1024usize as u32);
}
#[inline]
pub fn dq_frame(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2048usize as u32)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_dq_frame(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2048usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 11u32) & (2048usize as u32);
}
#[inline]
pub fn dq_profile(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12288usize as u32)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_dq_profile(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(12288usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 12u32) & (12288usize as u32);
}
#[inline]
pub fn dq_sb_edge(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (49152usize as u32)) >>
14u32) as u32)
}
}
#[inline]
pub fn set_dq_sb_edge(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(49152usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (49152usize as u32);
}
#[inline]
pub fn dq_db_edge(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (196608usize as u32))
>> 16u32) as u32)
}
}
#[inline]
pub fn set_dq_db_edge(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(196608usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 16u32) & (196608usize as u32);
}
#[inline]
pub fn dq_binary_level(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (262144usize as u32))
>> 18u32) as u32)
}
}
#[inline]
pub fn set_dq_binary_level(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(262144usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 18u32) & (262144usize as u32);
}
#[inline]
pub fn alt_pic_quantizer(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16252928usize as u32))
>> 19u32) as u32)
}
}
#[inline]
pub fn set_alt_pic_quantizer(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16252928usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 19u32) & (16252928usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_9() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_9>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_9>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_9 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_10 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferVC1__bindgen_ty_10__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVC1__bindgen_ty_10__bindgen_ty_1 {
pub _bitfield_1: u16,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_10__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_10__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_10__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_10__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferVC1__bindgen_ty_10__bindgen_ty_1 {
#[inline]
pub fn variable_sized_transform_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u16)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_variable_sized_transform_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 0u32) & (1usize as u16);
}
#[inline]
pub fn mb_level_transform_type_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u16)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_mb_level_transform_type_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 1u32) & (2usize as u16);
}
#[inline]
pub fn frame_level_transform_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12usize as u16)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_frame_level_transform_type(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(12usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 2u32) & (12usize as u16);
}
#[inline]
pub fn transform_ac_codingset_idx1(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (48usize as u16)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_transform_ac_codingset_idx1(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(48usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 4u32) & (48usize as u16);
}
#[inline]
pub fn transform_ac_codingset_idx2(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (192usize as u16)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_transform_ac_codingset_idx2(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(192usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 6u32) & (192usize as u16);
}
#[inline]
pub fn intra_transform_dc_table(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u16)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_intra_transform_dc_table(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(256usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 8u32) & (256usize as u16);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1__bindgen_ty_10() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1__bindgen_ty_10>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1__bindgen_ty_10>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVC1__bindgen_ty_10 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVC1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVC1>() ,
72usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVC1>() ,
4usize);
}
impl Clone for _VAPictureParameterBufferVC1 {
fn clone(&self) -> Self { *self }
}
pub type VAPictureParameterBufferVC1 = _VAPictureParameterBufferVC1;
/** VC-1 Bitplane Buffer
There will be at most three bitplanes coded in any picture header. To send
the bitplane data more efficiently, each byte is divided in two nibbles, with
each nibble carrying three bitplanes for one macroblock. The following table
shows the bitplane data arrangement within each nibble based on the picture
type.
Picture Type Bit3 Bit2 Bit1 Bit0
I or BI OVERFLAGS ACPRED FIELDTX
P MYTYPEMB SKIPMB DIRECTMB
B FORWARDMB SKIPMB DIRECTMB
Within each byte, the lower nibble is for the first MB and the upper nibble is
for the second MB. E.g. the lower nibble of the first byte in the bitplane
buffer is for Macroblock #1 and the upper nibble of the first byte is for
Macroblock #2 in the first row.
*/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASliceParameterBufferVC1 {
pub slice_data_size: ::std::os::raw::c_uint,
pub slice_data_offset: ::std::os::raw::c_uint,
pub slice_data_flag: ::std::os::raw::c_uint,
pub macroblock_offset: ::std::os::raw::c_uint,
pub slice_vertical_position: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VASliceParameterBufferVC1() {
assert_eq!(::std::mem::size_of::<_VASliceParameterBufferVC1>() , 20usize);
assert_eq!(::std::mem::align_of::<_VASliceParameterBufferVC1>() , 4usize);
}
impl Clone for _VASliceParameterBufferVC1 {
fn clone(&self) -> Self { *self }
}
pub type VASliceParameterBufferVC1 = _VASliceParameterBufferVC1;
/****************************
* H.264/AVC data structures
****************************/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureH264 {
pub picture_id: VASurfaceID,
pub frame_idx: ::std::os::raw::c_uint,
pub flags: ::std::os::raw::c_uint,
pub TopFieldOrderCnt: ::std::os::raw::c_int,
pub BottomFieldOrderCnt: ::std::os::raw::c_int,
}
#[test]
fn bindgen_test_layout__VAPictureH264() {
assert_eq!(::std::mem::size_of::<_VAPictureH264>() , 20usize);
assert_eq!(::std::mem::align_of::<_VAPictureH264>() , 4usize);
}
impl Clone for _VAPictureH264 {
fn clone(&self) -> Self { *self }
}
pub type VAPictureH264 = _VAPictureH264;
/** H.264 Picture Parameter Buffer */
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferH264 {
pub CurrPic: VAPictureH264,
pub ReferenceFrames: [VAPictureH264; 16usize],
pub picture_width_in_mbs_minus1: ::std::os::raw::c_ushort,
pub picture_height_in_mbs_minus1: ::std::os::raw::c_ushort,
pub bit_depth_luma_minus8: ::std::os::raw::c_uchar,
pub bit_depth_chroma_minus8: ::std::os::raw::c_uchar,
pub num_ref_frames: ::std::os::raw::c_uchar,
pub seq_fields: _VAPictureParameterBufferH264__bindgen_ty_1,
pub num_slice_groups_minus1: ::std::os::raw::c_uchar,
pub slice_group_map_type: ::std::os::raw::c_uchar,
pub slice_group_change_rate_minus1: ::std::os::raw::c_ushort,
pub pic_init_qp_minus26: ::std::os::raw::c_char,
pub pic_init_qs_minus26: ::std::os::raw::c_char,
pub chroma_qp_index_offset: ::std::os::raw::c_char,
pub second_chroma_qp_index_offset: ::std::os::raw::c_char,
pub pic_fields: _VAPictureParameterBufferH264__bindgen_ty_2,
pub frame_num: ::std::os::raw::c_ushort,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferH264__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferH264__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferH264__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferH264__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferH264__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferH264__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferH264__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferH264__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn chroma_format_idc(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_chroma_format_idc(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(3usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (3usize as u32);
}
#[inline]
pub fn residual_colour_transform_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u32)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_residual_colour_transform_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 2u32) & (4usize as u32);
}
#[inline]
pub fn gaps_in_frame_num_value_allowed_flag(&self)
-> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u32)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_gaps_in_frame_num_value_allowed_flag(&mut self,
val:
::std::os::raw::c_uint) {
self._bitfield_1 &= !(8usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 3u32) & (8usize as u32);
}
#[inline]
pub fn frame_mbs_only_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_frame_mbs_only_flag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (16usize as u32);
}
#[inline]
pub fn mb_adaptive_frame_field_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u32)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_mb_adaptive_frame_field_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(32usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 5u32) & (32usize as u32);
}
#[inline]
pub fn direct_8x8_inference_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u32)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_direct_8x8_inference_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(64usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 6u32) & (64usize as u32);
}
#[inline]
pub fn MinLumaBiPredSize8x8(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u32)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_MinLumaBiPredSize8x8(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(128usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 7u32) & (128usize as u32);
}
#[inline]
pub fn log2_max_frame_num_minus4(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3840usize as u32)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_log2_max_frame_num_minus4(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(3840usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 8u32) & (3840usize as u32);
}
#[inline]
pub fn pic_order_cnt_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12288usize as u32)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_pic_order_cnt_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(12288usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 12u32) & (12288usize as u32);
}
#[inline]
pub fn log2_max_pic_order_cnt_lsb_minus4(&self)
-> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (245760usize as u32))
>> 14u32) as u32)
}
}
#[inline]
pub fn set_log2_max_pic_order_cnt_lsb_minus4(&mut self,
val:
::std::os::raw::c_uint) {
self._bitfield_1 &= !(245760usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (245760usize as u32);
}
#[inline]
pub fn delta_pic_order_always_zero_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (262144usize as u32))
>> 18u32) as u32)
}
}
#[inline]
pub fn set_delta_pic_order_always_zero_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(262144usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 18u32) & (262144usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferH264__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferH264__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferH264__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferH264__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferH264__bindgen_ty_2 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferH264__bindgen_ty_2__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferH264__bindgen_ty_2__bindgen_ty_1 {
pub _bitfield_1: u16,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferH264__bindgen_ty_2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferH264__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferH264__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferH264__bindgen_ty_2__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferH264__bindgen_ty_2__bindgen_ty_1 {
#[inline]
pub fn entropy_coding_mode_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u16)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_entropy_coding_mode_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 0u32) & (1usize as u16);
}
#[inline]
pub fn weighted_pred_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u16)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_weighted_pred_flag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 1u32) & (2usize as u16);
}
#[inline]
pub fn weighted_bipred_idc(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12usize as u16)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_weighted_bipred_idc(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(12usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 2u32) & (12usize as u16);
}
#[inline]
pub fn transform_8x8_mode_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u16)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_transform_8x8_mode_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 4u32) & (16usize as u16);
}
#[inline]
pub fn field_pic_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u16)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_field_pic_flag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(32usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 5u32) & (32usize as u16);
}
#[inline]
pub fn constrained_intra_pred_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u16)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_constrained_intra_pred_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(64usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 6u32) & (64usize as u16);
}
#[inline]
pub fn pic_order_present_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u16)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_pic_order_present_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(128usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 7u32) & (128usize as u16);
}
#[inline]
pub fn deblocking_filter_control_present_flag(&self)
-> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u16)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_deblocking_filter_control_present_flag(&mut self,
val:
::std::os::raw::c_uint) {
self._bitfield_1 &= !(256usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 8u32) & (256usize as u16);
}
#[inline]
pub fn redundant_pic_cnt_present_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (512usize as u16)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_redundant_pic_cnt_present_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(512usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 9u32) & (512usize as u16);
}
#[inline]
pub fn reference_pic_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1024usize as u16)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_reference_pic_flag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1024usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 10u32) & (1024usize as u16);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferH264__bindgen_ty_2() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferH264__bindgen_ty_2>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferH264__bindgen_ty_2>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferH264__bindgen_ty_2 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferH264() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferH264>() ,
368usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferH264>() ,
4usize);
}
impl Clone for _VAPictureParameterBufferH264 {
fn clone(&self) -> Self { *self }
}
pub type VAPictureParameterBufferH264 = _VAPictureParameterBufferH264;
/** H.264 Inverse Quantization Matrix Buffer */
#[repr(C)]
#[derive(Copy)]
pub struct _VAIQMatrixBufferH264 {
/** \brief 4x4 scaling list, in raster scan order. */
pub ScalingList4x4: [[::std::os::raw::c_uchar; 16usize]; 6usize],
/** \brief 8x8 scaling list, in raster scan order. */
pub ScalingList8x8: [[::std::os::raw::c_uchar; 64usize]; 2usize],
}
#[test]
fn bindgen_test_layout__VAIQMatrixBufferH264() {
assert_eq!(::std::mem::size_of::<_VAIQMatrixBufferH264>() , 224usize);
assert_eq!(::std::mem::align_of::<_VAIQMatrixBufferH264>() , 1usize);
}
impl Clone for _VAIQMatrixBufferH264 {
fn clone(&self) -> Self { *self }
}
pub type VAIQMatrixBufferH264 = _VAIQMatrixBufferH264;
/** H.264 Slice Parameter Buffer */
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASliceParameterBufferH264 {
pub slice_data_size: ::std::os::raw::c_uint,
/** \brief Byte offset to the NAL Header Unit for this slice. */
pub slice_data_offset: ::std::os::raw::c_uint,
pub slice_data_flag: ::std::os::raw::c_uint,
/**
* \brief Bit offset from NAL Header Unit to the begining of slice_data().
*
* This bit offset is relative to and includes the NAL unit byte
* and represents the number of bits parsed in the slice_header()
* after the removal of any emulation prevention bytes in
* there. However, the slice data buffer passed to the hardware is
* the original bitstream, thus including any emulation prevention
* bytes.
*/
pub slice_data_bit_offset: ::std::os::raw::c_ushort,
pub first_mb_in_slice: ::std::os::raw::c_ushort,
pub slice_type: ::std::os::raw::c_uchar,
pub direct_spatial_mv_pred_flag: ::std::os::raw::c_uchar,
pub num_ref_idx_l0_active_minus1: ::std::os::raw::c_uchar,
pub num_ref_idx_l1_active_minus1: ::std::os::raw::c_uchar,
pub cabac_init_idc: ::std::os::raw::c_uchar,
pub slice_qp_delta: ::std::os::raw::c_char,
pub disable_deblocking_filter_idc: ::std::os::raw::c_uchar,
pub slice_alpha_c0_offset_div2: ::std::os::raw::c_char,
pub slice_beta_offset_div2: ::std::os::raw::c_char,
pub RefPicList0: [VAPictureH264; 32usize],
pub RefPicList1: [VAPictureH264; 32usize],
pub luma_log2_weight_denom: ::std::os::raw::c_uchar,
pub chroma_log2_weight_denom: ::std::os::raw::c_uchar,
pub luma_weight_l0_flag: ::std::os::raw::c_uchar,
pub luma_weight_l0: [::std::os::raw::c_short; 32usize],
pub luma_offset_l0: [::std::os::raw::c_short; 32usize],
pub chroma_weight_l0_flag: ::std::os::raw::c_uchar,
pub chroma_weight_l0: [[::std::os::raw::c_short; 2usize]; 32usize],
pub chroma_offset_l0: [[::std::os::raw::c_short; 2usize]; 32usize],
pub luma_weight_l1_flag: ::std::os::raw::c_uchar,
pub luma_weight_l1: [::std::os::raw::c_short; 32usize],
pub luma_offset_l1: [::std::os::raw::c_short; 32usize],
pub chroma_weight_l1_flag: ::std::os::raw::c_uchar,
pub chroma_weight_l1: [[::std::os::raw::c_short; 2usize]; 32usize],
pub chroma_offset_l1: [[::std::os::raw::c_short; 2usize]; 32usize],
}
#[test]
fn bindgen_test_layout__VASliceParameterBufferH264() {
assert_eq!(::std::mem::size_of::<_VASliceParameterBufferH264>() ,
2088usize);
assert_eq!(::std::mem::align_of::<_VASliceParameterBufferH264>() ,
4usize);
}
impl Clone for _VASliceParameterBufferH264 {
fn clone(&self) -> Self { *self }
}
pub type VASliceParameterBufferH264 = _VASliceParameterBufferH264;
pub const VAEncPictureTypeIntra: _bindgen_ty_11 =
_bindgen_ty_11::VAEncPictureTypeIntra;
pub const VAEncPictureTypePredictive: _bindgen_ty_11 =
_bindgen_ty_11::VAEncPictureTypePredictive;
pub const VAEncPictureTypeBidirectional: _bindgen_ty_11 =
_bindgen_ty_11::VAEncPictureTypeBidirectional;
#[repr(u32)]
/****************************
* Common encode data structures
****************************/
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_11 {
VAEncPictureTypeIntra = 0,
VAEncPictureTypePredictive = 1,
VAEncPictureTypeBidirectional = 2,
}
pub use self::_bindgen_ty_11 as VAEncPictureType;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSliceParameterBuffer {
pub start_row_number: ::std::os::raw::c_uint,
pub slice_height: ::std::os::raw::c_uint,
pub slice_flags: _VAEncSliceParameterBuffer__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSliceParameterBuffer__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAEncSliceParameterBuffer__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSliceParameterBuffer__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u8,
}
#[test]
fn bindgen_test_layout__VAEncSliceParameterBuffer__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSliceParameterBuffer__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSliceParameterBuffer__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncSliceParameterBuffer__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncSliceParameterBuffer__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn is_intra(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u8)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_is_intra(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 0u32) & (1usize as u8);
}
#[inline]
pub fn disable_deblocking_filter_idc(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (6usize as u8)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_disable_deblocking_filter_idc(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(6usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 1u32) & (6usize as u8);
}
#[inline]
pub fn uses_long_term_ref(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u8)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_uses_long_term_ref(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(8usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 3u32) & (8usize as u8);
}
#[inline]
pub fn is_long_term_ref(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u8)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_is_long_term_ref(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 4u32) & (16usize as u8);
}
}
#[test]
fn bindgen_test_layout__VAEncSliceParameterBuffer__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSliceParameterBuffer__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSliceParameterBuffer__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncSliceParameterBuffer__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncSliceParameterBuffer() {
assert_eq!(::std::mem::size_of::<_VAEncSliceParameterBuffer>() , 12usize);
assert_eq!(::std::mem::align_of::<_VAEncSliceParameterBuffer>() , 4usize);
}
impl Clone for _VAEncSliceParameterBuffer {
fn clone(&self) -> Self { *self }
}
pub type VAEncSliceParameterBuffer = _VAEncSliceParameterBuffer;
/****************************
* H.263 specific encode data structures
****************************/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferH263 {
pub intra_period: ::std::os::raw::c_uint,
pub bits_per_second: ::std::os::raw::c_uint,
pub frame_rate: ::std::os::raw::c_uint,
pub initial_qp: ::std::os::raw::c_uint,
pub min_qp: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferH263() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferH263>() ,
20usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferH263>() ,
4usize);
}
impl Clone for _VAEncSequenceParameterBufferH263 {
fn clone(&self) -> Self { *self }
}
pub type VAEncSequenceParameterBufferH263 = _VAEncSequenceParameterBufferH263;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferH263 {
pub reference_picture: VASurfaceID,
pub reconstructed_picture: VASurfaceID,
pub coded_buf: VABufferID,
pub picture_width: ::std::os::raw::c_ushort,
pub picture_height: ::std::os::raw::c_ushort,
pub picture_type: VAEncPictureType,
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferH263() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferH263>() ,
20usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferH263>() ,
4usize);
}
impl Clone for _VAEncPictureParameterBufferH263 {
fn clone(&self) -> Self { *self }
}
pub type VAEncPictureParameterBufferH263 = _VAEncPictureParameterBufferH263;
/****************************
* MPEG-4 specific encode data structures
****************************/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferMPEG4 {
pub profile_and_level_indication: ::std::os::raw::c_uchar,
pub intra_period: ::std::os::raw::c_uint,
pub video_object_layer_width: ::std::os::raw::c_uint,
pub video_object_layer_height: ::std::os::raw::c_uint,
pub vop_time_increment_resolution: ::std::os::raw::c_uint,
pub fixed_vop_rate: ::std::os::raw::c_uint,
pub fixed_vop_time_increment: ::std::os::raw::c_uint,
pub bits_per_second: ::std::os::raw::c_uint,
pub frame_rate: ::std::os::raw::c_uint,
pub initial_qp: ::std::os::raw::c_uint,
pub min_qp: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferMPEG4() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferMPEG4>() ,
44usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferMPEG4>() ,
4usize);
}
impl Clone for _VAEncSequenceParameterBufferMPEG4 {
fn clone(&self) -> Self { *self }
}
pub type VAEncSequenceParameterBufferMPEG4 =
_VAEncSequenceParameterBufferMPEG4;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferMPEG4 {
pub reference_picture: VASurfaceID,
pub reconstructed_picture: VASurfaceID,
pub coded_buf: VABufferID,
pub picture_width: ::std::os::raw::c_ushort,
pub picture_height: ::std::os::raw::c_ushort,
pub modulo_time_base: ::std::os::raw::c_uint,
pub vop_time_increment: ::std::os::raw::c_uint,
pub picture_type: VAEncPictureType,
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferMPEG4() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferMPEG4>() ,
28usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferMPEG4>() ,
4usize);
}
impl Clone for _VAEncPictureParameterBufferMPEG4 {
fn clone(&self) -> Self { *self }
}
pub type VAEncPictureParameterBufferMPEG4 = _VAEncPictureParameterBufferMPEG4;
extern "C" {
/**
* Creates a buffer for "num_elements" elements of "size" bytes and
* initalize with "data".
* if "data" is null, then the contents of the buffer data store
* are undefined.
* Basically there are two ways to get buffer data to the server side. One is
* to call vaCreateBuffer() with a non-null "data", which results the data being
* copied to the data store on the server side. A different method that
* eliminates this copy is to pass null as "data" when calling vaCreateBuffer(),
* and then use vaMapBuffer() to map the data store from the server side to the
* client address space for access.
* Note: image buffers are created by the library, not the client. Please see
* vaCreateImage on how image buffers are managed.
*/
pub fn vaCreateBuffer(dpy: VADisplay, context: VAContextID,
type_: VABufferType, size: ::std::os::raw::c_uint,
num_elements: ::std::os::raw::c_uint,
data: *mut ::std::os::raw::c_void,
buf_id: *mut VABufferID) -> VAStatus;
}
extern "C" {
/**
* Convey to the server how many valid elements are in the buffer.
* e.g. if multiple slice parameters are being held in a single buffer,
* this will communicate to the server the number of slice parameters
* that are valid in the buffer.
*/
pub fn vaBufferSetNumElements(dpy: VADisplay, buf_id: VABufferID,
num_elements: ::std::os::raw::c_uint)
-> VAStatus;
}
/**
* \brief Coded buffer segment.
*
* #VACodedBufferSegment is an element of a linked list describing
* some information on the coded buffer. The coded buffer segment
* could contain either a single NAL unit, or more than one NAL unit.
* It is recommended (but not required) to return a single NAL unit
* in a coded buffer segment, and the implementation should set the
* VA_CODED_BUF_STATUS_SINGLE_NALU status flag if that is the case.
*/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VACodedBufferSegment {
/**
* \brief Size of the data buffer in this segment (in bytes).
*/
pub size: ::std::os::raw::c_uint,
/** \brief Bit offset into the data buffer where the video data starts. */
pub bit_offset: ::std::os::raw::c_uint,
/** \brief Status set by the driver. See \c VA_CODED_BUF_STATUS_*. */
pub status: ::std::os::raw::c_uint,
/** \brief Reserved for future use. */
pub reserved: ::std::os::raw::c_uint,
/** \brief Pointer to the start of the data buffer. */
pub buf: *mut ::std::os::raw::c_void,
/**
* \brief Pointer to the next #VACodedBufferSegment element,
* or \c NULL if there is none.
*/
pub next: *mut ::std::os::raw::c_void,
}
#[test]
fn bindgen_test_layout__VACodedBufferSegment() {
assert_eq!(::std::mem::size_of::<_VACodedBufferSegment>() , 32usize);
assert_eq!(::std::mem::align_of::<_VACodedBufferSegment>() , 8usize);
}
impl Clone for _VACodedBufferSegment {
fn clone(&self) -> Self { *self }
}
pub type VACodedBufferSegment = _VACodedBufferSegment;
extern "C" {
/**
* Map data store of the buffer into the client's address space
* vaCreateBuffer() needs to be called with "data" set to NULL before
* calling vaMapBuffer()
*
* if buffer type is VAEncCodedBufferType, pbuf points to link-list of
* VACodedBufferSegment, and the list is terminated if "next" is NULL
*/
pub fn vaMapBuffer(dpy: VADisplay, buf_id: VABufferID,
pbuf: *mut *mut ::std::os::raw::c_void) -> VAStatus;
}
extern "C" {
/**
* After client making changes to a mapped data store, it needs to
* "Unmap" it to let the server know that the data is ready to be
* consumed by the server
*/
pub fn vaUnmapBuffer(dpy: VADisplay, buf_id: VABufferID) -> VAStatus;
}
extern "C" {
/**
* After this call, the buffer is deleted and this buffer_id is no longer valid
* Only call this if the buffer is not going to be passed to vaRenderBuffer
*/
pub fn vaDestroyBuffer(dpy: VADisplay, buffer_id: VABufferID) -> VAStatus;
}
/** \brief VA buffer information */
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _bindgen_ty_12 {
/** \brief Buffer handle */
pub handle: usize,
/** \brief Buffer type (See \ref VABufferType). */
pub type_: u32,
/**
* \brief Buffer memory type (See \ref VASurfaceAttribMemoryType).
*
* On input to vaAcquireBufferHandle(), this field can serve as a hint
* to specify the set of memory types the caller is interested in.
* On successful return from vaAcquireBufferHandle(), the field is
* updated with the best matching memory type.
*/
pub mem_type: u32,
/** \brief Size of the underlying buffer. */
pub mem_size: usize,
}
#[test]
fn bindgen_test_layout__bindgen_ty_12() {
assert_eq!(::std::mem::size_of::<_bindgen_ty_12>() , 24usize);
assert_eq!(::std::mem::align_of::<_bindgen_ty_12>() , 8usize);
}
impl Clone for _bindgen_ty_12 {
fn clone(&self) -> Self { *self }
}
pub type VABufferInfo = _bindgen_ty_12;
extern "C" {
/**
* \brief Acquires buffer handle for external API usage
*
* Locks the VA buffer object \ref buf_id for external API usage like
* EGL or OpenCL (OCL). This function is a synchronization point. This
* means that any pending operation is guaranteed to be completed
* prior to returning from the function.
*
* If the referenced VA buffer object is the backing store of a VA
* surface, then this function acts as if vaSyncSurface() on the
* parent surface was called first.
*
* The \ref VABufferInfo argument shall be zero'ed on input. On
* successful output, the data structure is filled in with all the
* necessary buffer level implementation details like handle, type,
* memory type and memory size.
*
* Note: the external API implementation, or the application, can
* express the memory types it is interested in by filling in the \ref
* mem_type field accordingly. On successful output, the memory type
* that fits best the request and that was used is updated in the \ref
* VABufferInfo data structure. If none of the supplied memory types
* is supported, then a \ref VA_STATUS_ERROR_UNSUPPORTED_MEMORY_TYPE
* error is returned.
*
* The \ref VABufferInfo data is valid until vaReleaseBufferHandle()
* is called. Besides, no additional operation is allowed on any of
* the buffer parent object until vaReleaseBufferHandle() is called.
* e.g. decoding into a VA surface backed with the supplied VA buffer
* object \ref buf_id would fail with a \ref VA_STATUS_ERROR_SURFACE_BUSY
* error.
*
* Possible errors:
* - \ref VA_STATUS_ERROR_UNIMPLEMENTED: the VA driver implementation
* does not support this interface
* - \ref VA_STATUS_ERROR_INVALID_DISPLAY: an invalid display was supplied
* - \ref VA_STATUS_ERROR_INVALID_BUFFER: an invalid buffer was supplied
* - \ref VA_STATUS_ERROR_UNSUPPORTED_BUFFERTYPE: the implementation
* does not support exporting buffers of the specified type
* - \ref VA_STATUS_ERROR_UNSUPPORTED_MEMORY_TYPE: none of the requested
* memory types in \ref VABufferInfo.mem_type was supported
*
* @param[in] dpy the VA display
* @param[in] buf_id the VA buffer
* @param[in,out] buf_info the associated VA buffer information
* @return VA_STATUS_SUCCESS if successful
*/
pub fn vaAcquireBufferHandle(dpy: VADisplay, buf_id: VABufferID,
buf_info: *mut VABufferInfo) -> VAStatus;
}
extern "C" {
/**
* \brief Releases buffer after usage from external API
*
* Unlocks the VA buffer object \ref buf_id from external API usage like
* EGL or OpenCL (OCL). This function is a synchronization point. This
* means that any pending operation is guaranteed to be completed
* prior to returning from the function.
*
* The \ref VABufferInfo argument shall point to the original data
* structure that was obtained from vaAcquireBufferHandle(), unaltered.
* This is necessary so that the VA driver implementation could
* deallocate any resources that were needed.
*
* In any case, returning from this function invalidates any contents
* in \ref VABufferInfo. i.e. the underlyng buffer handle is no longer
* valid. Therefore, VA driver implementations are free to reset this
* data structure to safe defaults.
*
* Possible errors:
* - \ref VA_STATUS_ERROR_UNIMPLEMENTED: the VA driver implementation
* does not support this interface
* - \ref VA_STATUS_ERROR_INVALID_DISPLAY: an invalid display was supplied
* - \ref VA_STATUS_ERROR_INVALID_BUFFER: an invalid buffer was supplied
* - \ref VA_STATUS_ERROR_UNSUPPORTED_BUFFERTYPE: the implementation
* does not support exporting buffers of the specified type
*
* @param[in] dpy the VA display
* @param[in] buf_id the VA buffer
* @return VA_STATUS_SUCCESS if successful
*/
pub fn vaReleaseBufferHandle(dpy: VADisplay, buf_id: VABufferID)
-> VAStatus;
}
extern "C" {
/**
* Get ready to decode a picture to a target surface
*/
pub fn vaBeginPicture(dpy: VADisplay, context: VAContextID,
render_target: VASurfaceID) -> VAStatus;
}
extern "C" {
/**
* Send decode buffers to the server.
* Buffers are automatically destroyed afterwards
*/
pub fn vaRenderPicture(dpy: VADisplay, context: VAContextID,
buffers: *mut VABufferID,
num_buffers: ::std::os::raw::c_int) -> VAStatus;
}
extern "C" {
/**
* Make the end of rendering for a picture.
* The server should start processing all pending operations for this
* surface. This call is non-blocking. The client can start another
* Begin/Render/End sequence on a different render target.
*/
pub fn vaEndPicture(dpy: VADisplay, context: VAContextID) -> VAStatus;
}
extern "C" {
/**
* This function blocks until all pending operations on the render target
* have been completed. Upon return it is safe to use the render target for a
* different picture.
*/
pub fn vaSyncSurface(dpy: VADisplay, render_target: VASurfaceID)
-> VAStatus;
}
pub const VASurfaceRendering: _bindgen_ty_13 =
_bindgen_ty_13::VASurfaceRendering;
pub const VASurfaceDisplaying: _bindgen_ty_13 =
_bindgen_ty_13::VASurfaceDisplaying;
pub const VASurfaceReady: _bindgen_ty_13 = _bindgen_ty_13::VASurfaceReady;
pub const VASurfaceSkipped: _bindgen_ty_13 = _bindgen_ty_13::VASurfaceSkipped;
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_13 {
VASurfaceRendering = 1,
VASurfaceDisplaying = 2,
VASurfaceReady = 4,
VASurfaceSkipped = 8,
}
pub use self::_bindgen_ty_13 as VASurfaceStatus;
extern "C" {
/**
* Find out any pending ops on the render target
*/
pub fn vaQuerySurfaceStatus(dpy: VADisplay, render_target: VASurfaceID,
status: *mut VASurfaceStatus) -> VAStatus;
}
pub const VADecodeSliceMissing: _bindgen_ty_14 =
_bindgen_ty_14::VADecodeSliceMissing;
pub const VADecodeMBError: _bindgen_ty_14 = _bindgen_ty_14::VADecodeMBError;
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_14 { VADecodeSliceMissing = 0, VADecodeMBError = 1, }
pub use self::_bindgen_ty_14 as VADecodeErrorType;
/**
* Client calls vaQuerySurfaceError with VA_STATUS_ERROR_DECODING_ERROR, server side returns
* an array of structure VASurfaceDecodeMBErrors, and the array is terminated by setting status=-1
*/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASurfaceDecodeMBErrors {
pub status: ::std::os::raw::c_int,
pub start_mb: ::std::os::raw::c_uint,
pub end_mb: ::std::os::raw::c_uint,
pub decode_error_type: VADecodeErrorType,
}
#[test]
fn bindgen_test_layout__VASurfaceDecodeMBErrors() {
assert_eq!(::std::mem::size_of::<_VASurfaceDecodeMBErrors>() , 16usize);
assert_eq!(::std::mem::align_of::<_VASurfaceDecodeMBErrors>() , 4usize);
}
impl Clone for _VASurfaceDecodeMBErrors {
fn clone(&self) -> Self { *self }
}
pub type VASurfaceDecodeMBErrors = _VASurfaceDecodeMBErrors;
extern "C" {
/**
* After the application gets VA_STATUS_ERROR_DECODING_ERROR after calling vaSyncSurface(),
* it can call vaQuerySurfaceError to find out further details on the particular error.
* VA_STATUS_ERROR_DECODING_ERROR should be passed in as "error_status",
* upon the return, error_info will point to an array of _VASurfaceDecodeMBErrors structure,
* which is allocated and filled by libVA with detailed information on the missing or error macroblocks.
* The array is terminated if "status==-1" is detected.
*/
pub fn vaQuerySurfaceError(dpy: VADisplay, surface: VASurfaceID,
error_status: VAStatus,
error_info: *mut *mut ::std::os::raw::c_void)
-> VAStatus;
}
#[repr(C)]
#[derive(Debug, Copy, Default)]
pub struct _VAImageFormat {
pub fourcc: ::std::os::raw::c_uint,
pub byte_order: ::std::os::raw::c_uint,
pub bits_per_pixel: ::std::os::raw::c_uint,
pub depth: ::std::os::raw::c_uint,
pub red_mask: ::std::os::raw::c_uint,
pub green_mask: ::std::os::raw::c_uint,
pub blue_mask: ::std::os::raw::c_uint,
pub alpha_mask: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VAImageFormat() {
assert_eq!(::std::mem::size_of::<_VAImageFormat>() , 32usize);
assert_eq!(::std::mem::align_of::<_VAImageFormat>() , 4usize);
}
impl Clone for _VAImageFormat {
fn clone(&self) -> Self { *self }
}
pub type VAImageFormat = _VAImageFormat;
pub type VAImageID = VAGenericID;
#[repr(C)]
#[derive(Debug, Copy, Default)]
pub struct _VAImage {
pub image_id: VAImageID,
pub format: VAImageFormat,
pub buf: VABufferID,
pub width: ::std::os::raw::c_ushort,
pub height: ::std::os::raw::c_ushort,
pub data_size: ::std::os::raw::c_uint,
pub num_planes: ::std::os::raw::c_uint,
pub pitches: [::std::os::raw::c_uint; 3usize],
pub offsets: [::std::os::raw::c_uint; 3usize],
pub num_palette_entries: ::std::os::raw::c_int,
pub entry_bytes: ::std::os::raw::c_int,
pub component_order: [::std::os::raw::c_char; 4usize],
}
#[test]
fn bindgen_test_layout__VAImage() {
assert_eq!(::std::mem::size_of::<_VAImage>() , 88usize);
assert_eq!(::std::mem::align_of::<_VAImage>() , 4usize);
}
impl Clone for _VAImage {
fn clone(&self) -> Self { *self }
}
pub type VAImage = _VAImage;
extern "C" {
/** Get maximum number of image formats supported by the implementation */
pub fn vaMaxNumImageFormats(dpy: VADisplay) -> ::std::os::raw::c_int;
}
extern "C" {
/**
* Query supported image formats
* The caller must provide a "format_list" array that can hold at
* least vaMaxNumImageFormats() entries. The actual number of formats
* returned in "format_list" is returned in "num_formats".
*/
pub fn vaQueryImageFormats(dpy: VADisplay,
format_list: *mut VAImageFormat,
num_formats: *mut ::std::os::raw::c_int)
-> VAStatus;
}
extern "C" {
/**
* Create a VAImage structure
* The width and height fields returned in the VAImage structure may get
* enlarged for some YUV formats. Upon return from this function,
* image->buf has been created and proper storage allocated by the library.
* The client can access the image through the Map/Unmap calls.
*/
pub fn vaCreateImage(dpy: VADisplay, format: *mut VAImageFormat,
width: ::std::os::raw::c_int,
height: ::std::os::raw::c_int, image: *mut VAImage)
-> VAStatus;
}
extern "C" {
/**
* Should call DestroyImage before destroying the surface it is bound to
*/
pub fn vaDestroyImage(dpy: VADisplay, image: VAImageID) -> VAStatus;
}
extern "C" {
pub fn vaSetImagePalette(dpy: VADisplay, image: VAImageID,
palette: *mut ::std::os::raw::c_uchar)
-> VAStatus;
}
extern "C" {
/**
* Retrive surface data into a VAImage
* Image must be in a format supported by the implementation
*/
pub fn vaGetImage(dpy: VADisplay, surface: VASurfaceID,
x: ::std::os::raw::c_int, y: ::std::os::raw::c_int,
width: ::std::os::raw::c_uint,
height: ::std::os::raw::c_uint, image: VAImageID)
-> VAStatus;
}
extern "C" {
/**
* Copy data from a VAImage to a surface
* Image must be in a format supported by the implementation
* Returns a VA_STATUS_ERROR_SURFACE_BUSY if the surface
* shouldn't be rendered into when this is called
*/
pub fn vaPutImage(dpy: VADisplay, surface: VASurfaceID, image: VAImageID,
src_x: ::std::os::raw::c_int,
src_y: ::std::os::raw::c_int,
src_width: ::std::os::raw::c_uint,
src_height: ::std::os::raw::c_uint,
dest_x: ::std::os::raw::c_int,
dest_y: ::std::os::raw::c_int,
dest_width: ::std::os::raw::c_uint,
dest_height: ::std::os::raw::c_uint) -> VAStatus;
}
extern "C" {
/**
* Derive an VAImage from an existing surface.
* This interface will derive a VAImage and corresponding image buffer from
* an existing VA Surface. The image buffer can then be mapped/unmapped for
* direct CPU access. This operation is only possible on implementations with
* direct rendering capabilities and internal surface formats that can be
* represented with a VAImage. When the operation is not possible this interface
* will return VA_STATUS_ERROR_OPERATION_FAILED. Clients should then fall back
* to using vaCreateImage + vaPutImage to accomplish the same task in an
* indirect manner.
*
* Implementations should only return success when the resulting image buffer
* would be useable with vaMap/Unmap.
*
* When directly accessing a surface special care must be taken to insure
* proper synchronization with the graphics hardware. Clients should call
* vaQuerySurfaceStatus to insure that a surface is not the target of concurrent
* rendering or currently being displayed by an overlay.
*
* Additionally nothing about the contents of a surface should be assumed
* following a vaPutSurface. Implementations are free to modify the surface for
* scaling or subpicture blending within a call to vaPutImage.
*
* Calls to vaPutImage or vaGetImage using the same surface from which the image
* has been derived will return VA_STATUS_ERROR_SURFACE_BUSY. vaPutImage or
* vaGetImage with other surfaces is supported.
*
* An image created with vaDeriveImage should be freed with vaDestroyImage. The
* image and image buffer structures will be destroyed; however, the underlying
* surface will remain unchanged until freed with vaDestroySurfaces.
*/
pub fn vaDeriveImage(dpy: VADisplay, surface: VASurfaceID,
image: *mut VAImage) -> VAStatus;
}
/**
* Subpictures
* Subpicture is a special type of image that can be blended
* with a surface during vaPutSurface(). Subpicture can be used to render
* DVD sub-titles or closed captioning text etc.
*/
pub type VASubpictureID = VAGenericID;
extern "C" {
/** Get maximum number of subpicture formats supported by the implementation */
pub fn vaMaxNumSubpictureFormats(dpy: VADisplay) -> ::std::os::raw::c_int;
}
extern "C" {
/**
* Query supported subpicture formats
* The caller must provide a "format_list" array that can hold at
* least vaMaxNumSubpictureFormats() entries. The flags arrary holds the flag
* for each format to indicate additional capabilities for that format. The actual
* number of formats returned in "format_list" is returned in "num_formats".
* flags: returned value to indicate addtional capabilities
* VA_SUBPICTURE_CHROMA_KEYING - supports chroma-keying
* VA_SUBPICTURE_GLOBAL_ALPHA - supports global alpha
* VA_SUBPICTURE_DESTINATION_IS_SCREEN_COORD - supports unscaled screen relative subpictures for On Screen Display
*/
pub fn vaQuerySubpictureFormats(dpy: VADisplay,
format_list: *mut VAImageFormat,
flags: *mut ::std::os::raw::c_uint,
num_formats: *mut ::std::os::raw::c_uint)
-> VAStatus;
}
extern "C" {
/**
* Subpictures are created with an image associated.
*/
pub fn vaCreateSubpicture(dpy: VADisplay, image: VAImageID,
subpicture: *mut VASubpictureID) -> VAStatus;
}
extern "C" {
/**
* Destroy the subpicture before destroying the image it is assocated to
*/
pub fn vaDestroySubpicture(dpy: VADisplay, subpicture: VASubpictureID)
-> VAStatus;
}
extern "C" {
/**
* Bind an image to the subpicture. This image will now be associated with
* the subpicture instead of the one at creation.
*/
pub fn vaSetSubpictureImage(dpy: VADisplay, subpicture: VASubpictureID,
image: VAImageID) -> VAStatus;
}
extern "C" {
/**
* If chromakey is enabled, then the area where the source value falls within
* the chromakey [min, max] range is transparent
* The chromakey component format is the following:
* For RGB: [0:7] Red [8:15] Blue [16:23] Green
* For YUV: [0:7] V [8:15] U [16:23] Y
* The chromakey mask can be used to mask out certain components for chromakey
* comparision
*/
pub fn vaSetSubpictureChromakey(dpy: VADisplay,
subpicture: VASubpictureID,
chromakey_min: ::std::os::raw::c_uint,
chromakey_max: ::std::os::raw::c_uint,
chromakey_mask: ::std::os::raw::c_uint)
-> VAStatus;
}
extern "C" {
/**
* Global alpha value is between 0 and 1. A value of 1 means fully opaque and
* a value of 0 means fully transparent. If per-pixel alpha is also specified then
* the overall alpha is per-pixel alpha multiplied by the global alpha
*/
pub fn vaSetSubpictureGlobalAlpha(dpy: VADisplay,
subpicture: VASubpictureID,
global_alpha: f32) -> VAStatus;
}
extern "C" {
/**
* vaAssociateSubpicture associates the subpicture with target_surfaces.
* It defines the region mapping between the subpicture and the target
* surfaces through source and destination rectangles (with the same width and height).
* Both will be displayed at the next call to vaPutSurface. Additional
* associations before the call to vaPutSurface simply overrides the association.
*/
pub fn vaAssociateSubpicture(dpy: VADisplay, subpicture: VASubpictureID,
target_surfaces: *mut VASurfaceID,
num_surfaces: ::std::os::raw::c_int,
src_x: ::std::os::raw::c_short,
src_y: ::std::os::raw::c_short,
src_width: ::std::os::raw::c_ushort,
src_height: ::std::os::raw::c_ushort,
dest_x: ::std::os::raw::c_short,
dest_y: ::std::os::raw::c_short,
dest_width: ::std::os::raw::c_ushort,
dest_height: ::std::os::raw::c_ushort,
flags: ::std::os::raw::c_uint) -> VAStatus;
}
extern "C" {
/**
* vaDeassociateSubpicture removes the association of the subpicture with target_surfaces.
*/
pub fn vaDeassociateSubpicture(dpy: VADisplay, subpicture: VASubpictureID,
target_surfaces: *mut VASurfaceID,
num_surfaces: ::std::os::raw::c_int)
-> VAStatus;
}
pub const VADISPLAYATTRIB_BLE_OFF: _bindgen_ty_15 =
_bindgen_ty_15::VADISPLAYATTRIB_BLE_OFF;
pub const VADISPLAYATTRIB_BLE_LOW: _bindgen_ty_15 =
_bindgen_ty_15::VADISPLAYATTRIB_BLE_LOW;
pub const VADISPLAYATTRIB_BLE_MEDIUM: _bindgen_ty_15 =
_bindgen_ty_15::VADISPLAYATTRIB_BLE_MEDIUM;
pub const VADISPLAYATTRIB_BLE_HIGH: _bindgen_ty_15 =
_bindgen_ty_15::VADISPLAYATTRIB_BLE_HIGH;
pub const VADISPLAYATTRIB_BLE_NONE: _bindgen_ty_15 =
_bindgen_ty_15::VADISPLAYATTRIB_BLE_NONE;
#[repr(u32)]
/**
* Display attributes
* Display attributes are used to control things such as contrast, hue, saturation,
* brightness etc. in the rendering process. The application can query what
* attributes are supported by the driver, and then set the appropriate attributes
* before calling vaPutSurface()
*/
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_15 {
VADISPLAYATTRIB_BLE_OFF = 0,
VADISPLAYATTRIB_BLE_LOW = 1,
VADISPLAYATTRIB_BLE_MEDIUM = 2,
VADISPLAYATTRIB_BLE_HIGH = 3,
VADISPLAYATTRIB_BLE_NONE = 4,
}
pub use self::_bindgen_ty_15 as VADisplayAttribBLEMode;
pub const VADisplayAttribBrightness: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribBrightness;
pub const VADisplayAttribContrast: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribContrast;
pub const VADisplayAttribHue: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribHue;
pub const VADisplayAttribSaturation: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribSaturation;
pub const VADisplayAttribBackgroundColor: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribBackgroundColor;
pub const VADisplayAttribDirectSurface: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribDirectSurface;
pub const VADisplayAttribRotation: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribRotation;
pub const VADisplayAttribOutofLoopDeblock: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribOutofLoopDeblock;
pub const VADisplayAttribBLEBlackMode: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribBLEBlackMode;
pub const VADisplayAttribBLEWhiteMode: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribBLEWhiteMode;
pub const VADisplayAttribBlueStretch: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribBlueStretch;
pub const VADisplayAttribSkinColorCorrection: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribSkinColorCorrection;
pub const VADisplayAttribCSCMatrix: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribCSCMatrix;
pub const VADisplayAttribBlendColor: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribBlendColor;
pub const VADisplayAttribOverlayAutoPaintColorKey: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribOverlayAutoPaintColorKey;
pub const VADisplayAttribOverlayColorKey: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribOverlayColorKey;
pub const VADisplayAttribRenderMode: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribRenderMode;
pub const VADisplayAttribRenderDevice: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribRenderDevice;
pub const VADisplayAttribRenderRect: _bindgen_ty_16 =
_bindgen_ty_16::VADisplayAttribRenderRect;
#[repr(u32)]
/** Currently defined display attribute types */
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_16 {
VADisplayAttribBrightness = 0,
VADisplayAttribContrast = 1,
VADisplayAttribHue = 2,
VADisplayAttribSaturation = 3,
VADisplayAttribBackgroundColor = 4,
VADisplayAttribDirectSurface = 5,
VADisplayAttribRotation = 6,
VADisplayAttribOutofLoopDeblock = 7,
VADisplayAttribBLEBlackMode = 8,
VADisplayAttribBLEWhiteMode = 9,
VADisplayAttribBlueStretch = 10,
VADisplayAttribSkinColorCorrection = 11,
VADisplayAttribCSCMatrix = 12,
VADisplayAttribBlendColor = 13,
VADisplayAttribOverlayAutoPaintColorKey = 14,
VADisplayAttribOverlayColorKey = 15,
VADisplayAttribRenderMode = 16,
VADisplayAttribRenderDevice = 17,
VADisplayAttribRenderRect = 18,
}
pub use self::_bindgen_ty_16 as VADisplayAttribType;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VADisplayAttribute {
pub type_: VADisplayAttribType,
pub min_value: ::std::os::raw::c_int,
pub max_value: ::std::os::raw::c_int,
pub value: ::std::os::raw::c_int,
pub flags: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VADisplayAttribute() {
assert_eq!(::std::mem::size_of::<_VADisplayAttribute>() , 20usize);
assert_eq!(::std::mem::align_of::<_VADisplayAttribute>() , 4usize);
}
impl Clone for _VADisplayAttribute {
fn clone(&self) -> Self { *self }
}
pub type VADisplayAttribute = _VADisplayAttribute;
extern "C" {
/** Get maximum number of display attributs supported by the implementation */
pub fn vaMaxNumDisplayAttributes(dpy: VADisplay) -> ::std::os::raw::c_int;
}
extern "C" {
/**
* Query display attributes
* The caller must provide a "attr_list" array that can hold at
* least vaMaxNumDisplayAttributes() entries. The actual number of attributes
* returned in "attr_list" is returned in "num_attributes".
*/
pub fn vaQueryDisplayAttributes(dpy: VADisplay,
attr_list: *mut VADisplayAttribute,
num_attributes:
*mut ::std::os::raw::c_int)
-> VAStatus;
}
extern "C" {
/**
* Get display attributes
* This function returns the current attribute values in "attr_list".
* Only attributes returned with VA_DISPLAY_ATTRIB_GETTABLE set in the "flags" field
* from vaQueryDisplayAttributes() can have their values retrieved.
*/
pub fn vaGetDisplayAttributes(dpy: VADisplay,
attr_list: *mut VADisplayAttribute,
num_attributes: ::std::os::raw::c_int)
-> VAStatus;
}
extern "C" {
/**
* Set display attributes
* Only attributes returned with VA_DISPLAY_ATTRIB_SETTABLE set in the "flags" field
* from vaQueryDisplayAttributes() can be set. If the attribute is not settable or
* the value is out of range, the function returns VA_STATUS_ERROR_ATTR_NOT_SUPPORTED
*/
pub fn vaSetDisplayAttributes(dpy: VADisplay,
attr_list: *mut VADisplayAttribute,
num_attributes: ::std::os::raw::c_int)
-> VAStatus;
}
/****************************
* HEVC data structures
****************************/
/**
* \brief Description of picture properties of those in DPB surfaces.
*
* If only progressive scan is supported, each surface contains one whole
* frame picture.
* Otherwise, each surface contains two fields of whole picture.
* In this case, two entries of ReferenceFrames[] may share same picture_id
* value.
*/
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureHEVC {
/** \brief reconstructed picture buffer surface index
* invalid when taking value VA_INVALID_SURFACE.
*/
pub picture_id: VASurfaceID,
/** \brief picture order count.
* in HEVC, POCs for top and bottom fields of same picture should
* take different values.
*/
pub pic_order_cnt: i32,
pub flags: u32,
}
#[test]
fn bindgen_test_layout__VAPictureHEVC() {
assert_eq!(::std::mem::size_of::<_VAPictureHEVC>() , 12usize);
assert_eq!(::std::mem::align_of::<_VAPictureHEVC>() , 4usize);
}
impl Clone for _VAPictureHEVC {
fn clone(&self) -> Self { *self }
}
pub type VAPictureHEVC = _VAPictureHEVC;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferHEVC {
pub CurrPic: VAPictureHEVC,
pub ReferenceFrames: [VAPictureHEVC; 15usize],
pub pic_width_in_luma_samples: u16,
pub pic_height_in_luma_samples: u16,
pub pic_fields: _VAPictureParameterBufferHEVC__bindgen_ty_1,
pub sps_max_dec_pic_buffering_minus1: u8,
pub bit_depth_luma_minus8: u8,
pub bit_depth_chroma_minus8: u8,
pub pcm_sample_bit_depth_luma_minus1: u8,
pub pcm_sample_bit_depth_chroma_minus1: u8,
pub log2_min_luma_coding_block_size_minus3: u8,
pub log2_diff_max_min_luma_coding_block_size: u8,
pub log2_min_transform_block_size_minus2: u8,
pub log2_diff_max_min_transform_block_size: u8,
pub log2_min_pcm_luma_coding_block_size_minus3: u8,
pub log2_diff_max_min_pcm_luma_coding_block_size: u8,
pub max_transform_hierarchy_depth_intra: u8,
pub max_transform_hierarchy_depth_inter: u8,
pub init_qp_minus26: i8,
pub diff_cu_qp_delta_depth: u8,
pub pps_cb_qp_offset: i8,
pub pps_cr_qp_offset: i8,
pub log2_parallel_merge_level_minus2: u8,
pub num_tile_columns_minus1: u8,
pub num_tile_rows_minus1: u8,
pub column_width_minus1: [u16; 19usize],
pub row_height_minus1: [u16; 21usize],
pub slice_parsing_fields: _VAPictureParameterBufferHEVC__bindgen_ty_2,
pub log2_max_pic_order_cnt_lsb_minus4: u8,
pub num_short_term_ref_pic_sets: u8,
pub num_long_term_ref_pic_sps: u8,
pub num_ref_idx_l0_default_active_minus1: u8,
pub num_ref_idx_l1_default_active_minus1: u8,
pub pps_beta_offset_div2: i8,
pub pps_tc_offset_div2: i8,
pub num_extra_slice_header_bits: u8,
pub st_rps_bits: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferHEVC__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<u32>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn chroma_format_idc(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_chroma_format_idc(&mut self, val: u32) {
self._bitfield_1 &= !(3usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (3usize as u32);
}
#[inline]
pub fn separate_colour_plane_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u32)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_separate_colour_plane_flag(&mut self, val: u32) {
self._bitfield_1 &= !(4usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 2u32) & (4usize as u32);
}
#[inline]
pub fn pcm_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u32)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_pcm_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(8usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 3u32) & (8usize as u32);
}
#[inline]
pub fn scaling_list_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_scaling_list_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(16usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (16usize as u32);
}
#[inline]
pub fn transform_skip_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u32)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_transform_skip_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(32usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 5u32) & (32usize as u32);
}
#[inline]
pub fn amp_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u32)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_amp_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(64usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 6u32) & (64usize as u32);
}
#[inline]
pub fn strong_intra_smoothing_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u32)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_strong_intra_smoothing_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(128usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 7u32) & (128usize as u32);
}
#[inline]
pub fn sign_data_hiding_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u32)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_sign_data_hiding_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(256usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 8u32) & (256usize as u32);
}
#[inline]
pub fn constrained_intra_pred_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (512usize as u32)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_constrained_intra_pred_flag(&mut self, val: u32) {
self._bitfield_1 &= !(512usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 9u32) & (512usize as u32);
}
#[inline]
pub fn cu_qp_delta_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1024usize as u32)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_cu_qp_delta_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(1024usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 10u32) & (1024usize as u32);
}
#[inline]
pub fn weighted_pred_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2048usize as u32)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_weighted_pred_flag(&mut self, val: u32) {
self._bitfield_1 &= !(2048usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 11u32) & (2048usize as u32);
}
#[inline]
pub fn weighted_bipred_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4096usize as u32)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_weighted_bipred_flag(&mut self, val: u32) {
self._bitfield_1 &= !(4096usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 12u32) & (4096usize as u32);
}
#[inline]
pub fn transquant_bypass_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8192usize as u32)) >>
13u32) as u32)
}
}
#[inline]
pub fn set_transquant_bypass_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(8192usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 13u32) & (8192usize as u32);
}
#[inline]
pub fn tiles_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16384usize as u32)) >>
14u32) as u32)
}
}
#[inline]
pub fn set_tiles_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(16384usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (16384usize as u32);
}
#[inline]
pub fn entropy_coding_sync_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32768usize as u32)) >>
15u32) as u32)
}
}
#[inline]
pub fn set_entropy_coding_sync_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(32768usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 15u32) & (32768usize as u32);
}
#[inline]
pub fn pps_loop_filter_across_slices_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (65536usize as u32)) >>
16u32) as u32)
}
}
#[inline]
pub fn set_pps_loop_filter_across_slices_enabled_flag(&mut self,
val: u32) {
self._bitfield_1 &= !(65536usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 16u32) & (65536usize as u32);
}
#[inline]
pub fn loop_filter_across_tiles_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (131072usize as u32))
>> 17u32) as u32)
}
}
#[inline]
pub fn set_loop_filter_across_tiles_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(131072usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 17u32) & (131072usize as u32);
}
#[inline]
pub fn pcm_loop_filter_disabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (262144usize as u32))
>> 18u32) as u32)
}
}
#[inline]
pub fn set_pcm_loop_filter_disabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(262144usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 18u32) & (262144usize as u32);
}
#[inline]
pub fn NoPicReorderingFlag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (524288usize as u32))
>> 19u32) as u32)
}
}
#[inline]
pub fn set_NoPicReorderingFlag(&mut self, val: u32) {
self._bitfield_1 &= !(524288usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 19u32) & (524288usize as u32);
}
#[inline]
pub fn NoBiPredFlag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1048576usize as u32))
>> 20u32) as u32)
}
}
#[inline]
pub fn set_NoBiPredFlag(&mut self, val: u32) {
self._bitfield_1 &= !(1048576usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 20u32) & (1048576usize as u32);
}
#[inline]
pub fn ReservedBits(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(4292870144usize as u32)) >> 21u32) as
u32)
}
}
#[inline]
pub fn set_ReservedBits(&mut self, val: u32) {
self._bitfield_1 &= !(4292870144usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 21u32) & (4292870144usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferHEVC__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferHEVC__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferHEVC__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferHEVC__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferHEVC__bindgen_ty_2 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferHEVC__bindgen_ty_2__bindgen_ty_1>,
pub value: __BindgenUnionField<u32>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferHEVC__bindgen_ty_2__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferHEVC__bindgen_ty_2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferHEVC__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferHEVC__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferHEVC__bindgen_ty_2__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferHEVC__bindgen_ty_2__bindgen_ty_1 {
#[inline]
pub fn lists_modification_present_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_lists_modification_present_flag(&mut self, val: u32) {
self._bitfield_1 &= !(1usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (1usize as u32);
}
#[inline]
pub fn long_term_ref_pics_present_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u32)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_long_term_ref_pics_present_flag(&mut self, val: u32) {
self._bitfield_1 &= !(2usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 1u32) & (2usize as u32);
}
#[inline]
pub fn sps_temporal_mvp_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u32)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_sps_temporal_mvp_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(4usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 2u32) & (4usize as u32);
}
#[inline]
pub fn cabac_init_present_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u32)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_cabac_init_present_flag(&mut self, val: u32) {
self._bitfield_1 &= !(8usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 3u32) & (8usize as u32);
}
#[inline]
pub fn output_flag_present_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_output_flag_present_flag(&mut self, val: u32) {
self._bitfield_1 &= !(16usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (16usize as u32);
}
#[inline]
pub fn dependent_slice_segments_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u32)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_dependent_slice_segments_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(32usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 5u32) & (32usize as u32);
}
#[inline]
pub fn pps_slice_chroma_qp_offsets_present_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u32)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_pps_slice_chroma_qp_offsets_present_flag(&mut self, val: u32) {
self._bitfield_1 &= !(64usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 6u32) & (64usize as u32);
}
#[inline]
pub fn sample_adaptive_offset_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u32)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_sample_adaptive_offset_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(128usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 7u32) & (128usize as u32);
}
#[inline]
pub fn deblocking_filter_override_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u32)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_deblocking_filter_override_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(256usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 8u32) & (256usize as u32);
}
#[inline]
pub fn pps_disable_deblocking_filter_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (512usize as u32)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_pps_disable_deblocking_filter_flag(&mut self, val: u32) {
self._bitfield_1 &= !(512usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 9u32) & (512usize as u32);
}
#[inline]
pub fn slice_segment_header_extension_present_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1024usize as u32)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_slice_segment_header_extension_present_flag(&mut self,
val: u32) {
self._bitfield_1 &= !(1024usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 10u32) & (1024usize as u32);
}
#[inline]
pub fn RapPicFlag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2048usize as u32)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_RapPicFlag(&mut self, val: u32) {
self._bitfield_1 &= !(2048usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 11u32) & (2048usize as u32);
}
#[inline]
pub fn IdrPicFlag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4096usize as u32)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_IdrPicFlag(&mut self, val: u32) {
self._bitfield_1 &= !(4096usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 12u32) & (4096usize as u32);
}
#[inline]
pub fn IntraPicFlag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8192usize as u32)) >>
13u32) as u32)
}
}
#[inline]
pub fn set_IntraPicFlag(&mut self, val: u32) {
self._bitfield_1 &= !(8192usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 13u32) & (8192usize as u32);
}
#[inline]
pub fn ReservedBits(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(4294950912usize as u32)) >> 14u32) as
u32)
}
}
#[inline]
pub fn set_ReservedBits(&mut self, val: u32) {
self._bitfield_1 &= !(4294950912usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (4294950912usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferHEVC__bindgen_ty_2() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferHEVC__bindgen_ty_2>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferHEVC__bindgen_ty_2>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferHEVC__bindgen_ty_2 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferHEVC() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferHEVC>() ,
316usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferHEVC>() ,
4usize);
}
impl Clone for _VAPictureParameterBufferHEVC {
fn clone(&self) -> Self { *self }
}
pub type VAPictureParameterBufferHEVC = _VAPictureParameterBufferHEVC;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASliceParameterBufferBaseHEVC {
pub slice_data_size: u32,
pub slice_data_offset: u32,
pub slice_data_flag: u16,
}
#[test]
fn bindgen_test_layout__VASliceParameterBufferBaseHEVC() {
assert_eq!(::std::mem::size_of::<_VASliceParameterBufferBaseHEVC>() ,
12usize);
assert_eq!(::std::mem::align_of::<_VASliceParameterBufferBaseHEVC>() ,
4usize);
}
impl Clone for _VASliceParameterBufferBaseHEVC {
fn clone(&self) -> Self { *self }
}
pub type VASliceParameterBufferBaseHEVC = _VASliceParameterBufferBaseHEVC;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASliceParameterBufferHEVC {
pub slice_data_size: u32,
pub slice_data_offset: u32,
pub slice_data_flag: u16,
pub slice_data_byte_offset: u32,
pub slice_segment_address: u32,
pub RefPicList: [[u8; 15usize]; 2usize],
pub LongSliceFlags: _VASliceParameterBufferHEVC__bindgen_ty_1,
pub collocated_ref_idx: u8,
pub num_ref_idx_l0_active_minus1: u8,
pub num_ref_idx_l1_active_minus1: u8,
pub slice_qp_delta: i8,
pub slice_cb_qp_offset: i8,
pub slice_cr_qp_offset: i8,
pub slice_beta_offset_div2: i8,
pub slice_tc_offset_div2: i8,
pub luma_log2_weight_denom: u8,
pub delta_chroma_log2_weight_denom: i8,
pub delta_luma_weight_l0: [i8; 15usize],
pub luma_offset_l0: [i8; 15usize],
pub delta_chroma_weight_l0: [[i8; 2usize]; 15usize],
pub ChromaOffsetL0: [[i8; 2usize]; 15usize],
pub delta_luma_weight_l1: [i8; 15usize],
pub luma_offset_l1: [i8; 15usize],
pub delta_chroma_weight_l1: [[i8; 2usize]; 15usize],
pub ChromaOffsetL1: [[i8; 2usize]; 15usize],
pub five_minus_max_num_merge_cand: u8,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASliceParameterBufferHEVC__bindgen_ty_1 {
pub value: __BindgenUnionField<u32>,
pub fields: __BindgenUnionField<_VASliceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASliceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VASliceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VASliceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VASliceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VASliceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VASliceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn LastSliceOfPic(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_LastSliceOfPic(&mut self, val: u32) {
self._bitfield_1 &= !(1usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (1usize as u32);
}
#[inline]
pub fn dependent_slice_segment_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u32)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_dependent_slice_segment_flag(&mut self, val: u32) {
self._bitfield_1 &= !(2usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 1u32) & (2usize as u32);
}
#[inline]
pub fn slice_type(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12usize as u32)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_slice_type(&mut self, val: u32) {
self._bitfield_1 &= !(12usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 2u32) & (12usize as u32);
}
#[inline]
pub fn color_plane_id(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (48usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_color_plane_id(&mut self, val: u32) {
self._bitfield_1 &= !(48usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (48usize as u32);
}
#[inline]
pub fn slice_sao_luma_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u32)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_slice_sao_luma_flag(&mut self, val: u32) {
self._bitfield_1 &= !(64usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 6u32) & (64usize as u32);
}
#[inline]
pub fn slice_sao_chroma_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u32)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_slice_sao_chroma_flag(&mut self, val: u32) {
self._bitfield_1 &= !(128usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 7u32) & (128usize as u32);
}
#[inline]
pub fn mvd_l1_zero_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u32)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_mvd_l1_zero_flag(&mut self, val: u32) {
self._bitfield_1 &= !(256usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 8u32) & (256usize as u32);
}
#[inline]
pub fn cabac_init_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (512usize as u32)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_cabac_init_flag(&mut self, val: u32) {
self._bitfield_1 &= !(512usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 9u32) & (512usize as u32);
}
#[inline]
pub fn slice_temporal_mvp_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1024usize as u32)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_slice_temporal_mvp_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(1024usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 10u32) & (1024usize as u32);
}
#[inline]
pub fn slice_deblocking_filter_disabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2048usize as u32)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_slice_deblocking_filter_disabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(2048usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 11u32) & (2048usize as u32);
}
#[inline]
pub fn collocated_from_l0_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4096usize as u32)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_collocated_from_l0_flag(&mut self, val: u32) {
self._bitfield_1 &= !(4096usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 12u32) & (4096usize as u32);
}
#[inline]
pub fn slice_loop_filter_across_slices_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8192usize as u32)) >>
13u32) as u32)
}
}
#[inline]
pub fn set_slice_loop_filter_across_slices_enabled_flag(&mut self,
val: u32) {
self._bitfield_1 &= !(8192usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 13u32) & (8192usize as u32);
}
#[inline]
pub fn reserved(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(4294950912usize as u32)) >> 14u32) as
u32)
}
}
#[inline]
pub fn set_reserved(&mut self, val: u32) {
self._bitfield_1 &= !(4294950912usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (4294950912usize as u32);
}
}
#[test]
fn bindgen_test_layout__VASliceParameterBufferHEVC__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VASliceParameterBufferHEVC__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VASliceParameterBufferHEVC__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VASliceParameterBufferHEVC__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VASliceParameterBufferHEVC() {
assert_eq!(::std::mem::size_of::<_VASliceParameterBufferHEVC>() ,
248usize);
assert_eq!(::std::mem::align_of::<_VASliceParameterBufferHEVC>() ,
4usize);
}
impl Clone for _VASliceParameterBufferHEVC {
fn clone(&self) -> Self { *self }
}
pub type VASliceParameterBufferHEVC = _VASliceParameterBufferHEVC;
#[repr(C)]
#[derive(Copy)]
pub struct _VAIQMatrixBufferHEVC {
pub ScalingList4x4: [[u8; 16usize]; 6usize],
pub ScalingList8x8: [[u8; 64usize]; 6usize],
pub ScalingList16x16: [[u8; 64usize]; 6usize],
pub ScalingList32x32: [[u8; 64usize]; 2usize],
pub ScalingListDC16x16: [u8; 6usize],
pub ScalingListDC32x32: [u8; 2usize],
}
#[test]
fn bindgen_test_layout__VAIQMatrixBufferHEVC() {
assert_eq!(::std::mem::size_of::<_VAIQMatrixBufferHEVC>() , 1000usize);
assert_eq!(::std::mem::align_of::<_VAIQMatrixBufferHEVC>() , 1usize);
}
impl Clone for _VAIQMatrixBufferHEVC {
fn clone(&self) -> Self { *self }
}
pub type VAIQMatrixBufferHEVC = _VAIQMatrixBufferHEVC;
#[repr(C)]
pub struct _VAPictureParameterBufferJPEGBaseline {
pub picture_width: ::std::os::raw::c_ushort,
pub picture_height: ::std::os::raw::c_ushort,
pub components: [_VAPictureParameterBufferJPEGBaseline__bindgen_ty_1; 255usize],
pub num_components: ::std::os::raw::c_uchar,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferJPEGBaseline__bindgen_ty_1 {
pub component_id: ::std::os::raw::c_uchar,
pub h_sampling_factor: ::std::os::raw::c_uchar,
pub v_sampling_factor: ::std::os::raw::c_uchar,
pub quantiser_table_selector: ::std::os::raw::c_uchar,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferJPEGBaseline__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferJPEGBaseline__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferJPEGBaseline__bindgen_ty_1>()
, 1usize);
}
impl Clone for _VAPictureParameterBufferJPEGBaseline__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferJPEGBaseline() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferJPEGBaseline>()
, 1026usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferJPEGBaseline>()
, 2usize);
}
pub type VAPictureParameterBufferJPEGBaseline =
_VAPictureParameterBufferJPEGBaseline;
#[repr(C)]
#[derive(Copy)]
pub struct _VAIQMatrixBufferJPEGBaseline {
pub load_quantiser_table: [::std::os::raw::c_uchar; 4usize],
pub quantiser_table: [[::std::os::raw::c_uchar; 64usize]; 4usize],
}
#[test]
fn bindgen_test_layout__VAIQMatrixBufferJPEGBaseline() {
assert_eq!(::std::mem::size_of::<_VAIQMatrixBufferJPEGBaseline>() ,
260usize);
assert_eq!(::std::mem::align_of::<_VAIQMatrixBufferJPEGBaseline>() ,
1usize);
}
impl Clone for _VAIQMatrixBufferJPEGBaseline {
fn clone(&self) -> Self { *self }
}
pub type VAIQMatrixBufferJPEGBaseline = _VAIQMatrixBufferJPEGBaseline;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASliceParameterBufferJPEGBaseline {
pub slice_data_size: ::std::os::raw::c_uint,
pub slice_data_offset: ::std::os::raw::c_uint,
pub slice_data_flag: ::std::os::raw::c_uint,
pub slice_horizontal_position: ::std::os::raw::c_uint,
pub slice_vertical_position: ::std::os::raw::c_uint,
pub components: [_VASliceParameterBufferJPEGBaseline__bindgen_ty_1; 4usize],
pub num_components: ::std::os::raw::c_uchar,
pub restart_interval: ::std::os::raw::c_ushort,
pub num_mcus: ::std::os::raw::c_uint,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASliceParameterBufferJPEGBaseline__bindgen_ty_1 {
pub component_selector: ::std::os::raw::c_uchar,
pub dc_table_selector: ::std::os::raw::c_uchar,
pub ac_table_selector: ::std::os::raw::c_uchar,
}
#[test]
fn bindgen_test_layout__VASliceParameterBufferJPEGBaseline__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VASliceParameterBufferJPEGBaseline__bindgen_ty_1>()
, 3usize);
assert_eq!(::std::mem::align_of::<_VASliceParameterBufferJPEGBaseline__bindgen_ty_1>()
, 1usize);
}
impl Clone for _VASliceParameterBufferJPEGBaseline__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VASliceParameterBufferJPEGBaseline() {
assert_eq!(::std::mem::size_of::<_VASliceParameterBufferJPEGBaseline>() ,
40usize);
assert_eq!(::std::mem::align_of::<_VASliceParameterBufferJPEGBaseline>() ,
4usize);
}
impl Clone for _VASliceParameterBufferJPEGBaseline {
fn clone(&self) -> Self { *self }
}
pub type VASliceParameterBufferJPEGBaseline =
_VASliceParameterBufferJPEGBaseline;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VABoolCoderContextVPX {
pub range: ::std::os::raw::c_uchar,
pub value: ::std::os::raw::c_uchar,
pub count: ::std::os::raw::c_uchar,
}
#[test]
fn bindgen_test_layout__VABoolCoderContextVPX() {
assert_eq!(::std::mem::size_of::<_VABoolCoderContextVPX>() , 3usize);
assert_eq!(::std::mem::align_of::<_VABoolCoderContextVPX>() , 1usize);
}
impl Clone for _VABoolCoderContextVPX {
fn clone(&self) -> Self { *self }
}
pub type VABoolCoderContextVPX = _VABoolCoderContextVPX;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVP8 {
pub frame_width: ::std::os::raw::c_uint,
pub frame_height: ::std::os::raw::c_uint,
pub last_ref_frame: VASurfaceID,
pub golden_ref_frame: VASurfaceID,
pub alt_ref_frame: VASurfaceID,
pub out_of_loop_frame: VASurfaceID,
pub pic_fields: _VAPictureParameterBufferVP8__bindgen_ty_1,
pub mb_segment_tree_probs: [::std::os::raw::c_uchar; 3usize],
pub loop_filter_level: [::std::os::raw::c_uchar; 4usize],
pub loop_filter_deltas_ref_frame: [::std::os::raw::c_char; 4usize],
pub loop_filter_deltas_mode: [::std::os::raw::c_char; 4usize],
pub prob_skip_false: ::std::os::raw::c_uchar,
pub prob_intra: ::std::os::raw::c_uchar,
pub prob_last: ::std::os::raw::c_uchar,
pub prob_gf: ::std::os::raw::c_uchar,
pub y_mode_probs: [::std::os::raw::c_uchar; 4usize],
pub uv_mode_probs: [::std::os::raw::c_uchar; 3usize],
pub mv_probs: [[::std::os::raw::c_uchar; 19usize]; 2usize],
pub bool_coder_ctx: VABoolCoderContextVPX,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVP8__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAPictureParameterBufferVP8__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAPictureParameterBufferVP8__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVP8__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVP8__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVP8__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVP8__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAPictureParameterBufferVP8__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn key_frame(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_key_frame(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (1usize as u32);
}
#[inline]
pub fn version(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (14usize as u32)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_version(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(14usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 1u32) & (14usize as u32);
}
#[inline]
pub fn segmentation_enabled(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_segmentation_enabled(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (16usize as u32);
}
#[inline]
pub fn update_mb_segmentation_map(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u32)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_update_mb_segmentation_map(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(32usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 5u32) & (32usize as u32);
}
#[inline]
pub fn update_segment_feature_data(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u32)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_update_segment_feature_data(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(64usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 6u32) & (64usize as u32);
}
#[inline]
pub fn filter_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u32)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_filter_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(128usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 7u32) & (128usize as u32);
}
#[inline]
pub fn sharpness_level(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1792usize as u32)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_sharpness_level(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1792usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 8u32) & (1792usize as u32);
}
#[inline]
pub fn loop_filter_adj_enable(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2048usize as u32)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_loop_filter_adj_enable(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2048usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 11u32) & (2048usize as u32);
}
#[inline]
pub fn mode_ref_lf_delta_update(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4096usize as u32)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_mode_ref_lf_delta_update(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4096usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 12u32) & (4096usize as u32);
}
#[inline]
pub fn sign_bias_golden(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8192usize as u32)) >>
13u32) as u32)
}
}
#[inline]
pub fn set_sign_bias_golden(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(8192usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 13u32) & (8192usize as u32);
}
#[inline]
pub fn sign_bias_alternate(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16384usize as u32)) >>
14u32) as u32)
}
}
#[inline]
pub fn set_sign_bias_alternate(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16384usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (16384usize as u32);
}
#[inline]
pub fn mb_no_coeff_skip(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32768usize as u32)) >>
15u32) as u32)
}
}
#[inline]
pub fn set_mb_no_coeff_skip(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(32768usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 15u32) & (32768usize as u32);
}
#[inline]
pub fn loop_filter_disable(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (65536usize as u32)) >>
16u32) as u32)
}
}
#[inline]
pub fn set_loop_filter_disable(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(65536usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 16u32) & (65536usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVP8__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVP8__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVP8__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAPictureParameterBufferVP8__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAPictureParameterBufferVP8() {
assert_eq!(::std::mem::size_of::<_VAPictureParameterBufferVP8>() ,
96usize);
assert_eq!(::std::mem::align_of::<_VAPictureParameterBufferVP8>() ,
4usize);
}
impl Clone for _VAPictureParameterBufferVP8 {
fn clone(&self) -> Self { *self }
}
pub type VAPictureParameterBufferVP8 = _VAPictureParameterBufferVP8;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASliceParameterBufferVP8 {
pub slice_data_size: ::std::os::raw::c_uint,
pub slice_data_offset: ::std::os::raw::c_uint,
pub slice_data_flag: ::std::os::raw::c_uint,
pub macroblock_offset: ::std::os::raw::c_uint,
pub num_of_partitions: ::std::os::raw::c_uchar,
pub partition_size: [::std::os::raw::c_uint; 9usize],
}
#[test]
fn bindgen_test_layout__VASliceParameterBufferVP8() {
assert_eq!(::std::mem::size_of::<_VASliceParameterBufferVP8>() , 56usize);
assert_eq!(::std::mem::align_of::<_VASliceParameterBufferVP8>() , 4usize);
}
impl Clone for _VASliceParameterBufferVP8 {
fn clone(&self) -> Self { *self }
}
pub type VASliceParameterBufferVP8 = _VASliceParameterBufferVP8;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAProbabilityDataBufferVP8 {
pub dct_coeff_probs: [[[[::std::os::raw::c_uchar; 11usize]; 3usize]; 8usize]; 4usize],
}
#[test]
fn bindgen_test_layout__VAProbabilityDataBufferVP8() {
assert_eq!(::std::mem::size_of::<_VAProbabilityDataBufferVP8>() ,
1056usize);
assert_eq!(::std::mem::align_of::<_VAProbabilityDataBufferVP8>() ,
1usize);
}
impl Clone for _VAProbabilityDataBufferVP8 {
fn clone(&self) -> Self { *self }
}
pub type VAProbabilityDataBufferVP8 = _VAProbabilityDataBufferVP8;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAIQMatrixBufferVP8 {
pub quantization_index: [[::std::os::raw::c_ushort; 6usize]; 4usize],
}
#[test]
fn bindgen_test_layout__VAIQMatrixBufferVP8() {
assert_eq!(::std::mem::size_of::<_VAIQMatrixBufferVP8>() , 48usize);
assert_eq!(::std::mem::align_of::<_VAIQMatrixBufferVP8>() , 2usize);
}
impl Clone for _VAIQMatrixBufferVP8 {
fn clone(&self) -> Self { *self }
}
pub type VAIQMatrixBufferVP8 = _VAIQMatrixBufferVP8;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VADecPictureParameterBufferVP9 {
pub frame_width: u16,
pub frame_height: u16,
pub reference_frames: [VASurfaceID; 8usize],
pub pic_fields: _VADecPictureParameterBufferVP9__bindgen_ty_1,
pub filter_level: u8,
pub sharpness_level: u8,
pub log2_tile_rows: u8,
pub log2_tile_columns: u8,
pub frame_header_length_in_bytes: u8,
pub first_partition_size: u16,
pub mb_segment_tree_probs: [u8; 7usize],
pub segment_pred_probs: [u8; 3usize],
pub profile: u8,
pub bit_depth: u8,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VADecPictureParameterBufferVP9__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VADecPictureParameterBufferVP9__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<u32>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VADecPictureParameterBufferVP9__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VADecPictureParameterBufferVP9__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VADecPictureParameterBufferVP9__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VADecPictureParameterBufferVP9__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VADecPictureParameterBufferVP9__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VADecPictureParameterBufferVP9__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn subsampling_x(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_subsampling_x(&mut self, val: u32) {
self._bitfield_1 &= !(1usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (1usize as u32);
}
#[inline]
pub fn subsampling_y(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u32)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_subsampling_y(&mut self, val: u32) {
self._bitfield_1 &= !(2usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 1u32) & (2usize as u32);
}
#[inline]
pub fn frame_type(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u32)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_frame_type(&mut self, val: u32) {
self._bitfield_1 &= !(4usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 2u32) & (4usize as u32);
}
#[inline]
pub fn show_frame(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u32)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_show_frame(&mut self, val: u32) {
self._bitfield_1 &= !(8usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 3u32) & (8usize as u32);
}
#[inline]
pub fn error_resilient_mode(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_error_resilient_mode(&mut self, val: u32) {
self._bitfield_1 &= !(16usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (16usize as u32);
}
#[inline]
pub fn intra_only(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u32)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_intra_only(&mut self, val: u32) {
self._bitfield_1 &= !(32usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 5u32) & (32usize as u32);
}
#[inline]
pub fn allow_high_precision_mv(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u32)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_allow_high_precision_mv(&mut self, val: u32) {
self._bitfield_1 &= !(64usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 6u32) & (64usize as u32);
}
#[inline]
pub fn mcomp_filter_type(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (896usize as u32)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_mcomp_filter_type(&mut self, val: u32) {
self._bitfield_1 &= !(896usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 7u32) & (896usize as u32);
}
#[inline]
pub fn frame_parallel_decoding_mode(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1024usize as u32)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_frame_parallel_decoding_mode(&mut self, val: u32) {
self._bitfield_1 &= !(1024usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 10u32) & (1024usize as u32);
}
#[inline]
pub fn reset_frame_context(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (6144usize as u32)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_reset_frame_context(&mut self, val: u32) {
self._bitfield_1 &= !(6144usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 11u32) & (6144usize as u32);
}
#[inline]
pub fn refresh_frame_context(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8192usize as u32)) >>
13u32) as u32)
}
}
#[inline]
pub fn set_refresh_frame_context(&mut self, val: u32) {
self._bitfield_1 &= !(8192usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 13u32) & (8192usize as u32);
}
#[inline]
pub fn frame_context_idx(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (49152usize as u32)) >>
14u32) as u32)
}
}
#[inline]
pub fn set_frame_context_idx(&mut self, val: u32) {
self._bitfield_1 &= !(49152usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (49152usize as u32);
}
#[inline]
pub fn segmentation_enabled(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (65536usize as u32)) >>
16u32) as u32)
}
}
#[inline]
pub fn set_segmentation_enabled(&mut self, val: u32) {
self._bitfield_1 &= !(65536usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 16u32) & (65536usize as u32);
}
#[inline]
pub fn segmentation_temporal_update(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (131072usize as u32))
>> 17u32) as u32)
}
}
#[inline]
pub fn set_segmentation_temporal_update(&mut self, val: u32) {
self._bitfield_1 &= !(131072usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 17u32) & (131072usize as u32);
}
#[inline]
pub fn segmentation_update_map(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (262144usize as u32))
>> 18u32) as u32)
}
}
#[inline]
pub fn set_segmentation_update_map(&mut self, val: u32) {
self._bitfield_1 &= !(262144usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 18u32) & (262144usize as u32);
}
#[inline]
pub fn last_ref_frame(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3670016usize as u32))
>> 19u32) as u32)
}
}
#[inline]
pub fn set_last_ref_frame(&mut self, val: u32) {
self._bitfield_1 &= !(3670016usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 19u32) & (3670016usize as u32);
}
#[inline]
pub fn last_ref_frame_sign_bias(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4194304usize as u32))
>> 22u32) as u32)
}
}
#[inline]
pub fn set_last_ref_frame_sign_bias(&mut self, val: u32) {
self._bitfield_1 &= !(4194304usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 22u32) & (4194304usize as u32);
}
#[inline]
pub fn golden_ref_frame(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (58720256usize as u32))
>> 23u32) as u32)
}
}
#[inline]
pub fn set_golden_ref_frame(&mut self, val: u32) {
self._bitfield_1 &= !(58720256usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 23u32) & (58720256usize as u32);
}
#[inline]
pub fn golden_ref_frame_sign_bias(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (67108864usize as u32))
>> 26u32) as u32)
}
}
#[inline]
pub fn set_golden_ref_frame_sign_bias(&mut self, val: u32) {
self._bitfield_1 &= !(67108864usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 26u32) & (67108864usize as u32);
}
#[inline]
pub fn alt_ref_frame(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(939524096usize as u32)) >> 27u32) as
u32)
}
}
#[inline]
pub fn set_alt_ref_frame(&mut self, val: u32) {
self._bitfield_1 &= !(939524096usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 27u32) & (939524096usize as u32);
}
#[inline]
pub fn alt_ref_frame_sign_bias(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(1073741824usize as u32)) >> 30u32) as
u32)
}
}
#[inline]
pub fn set_alt_ref_frame_sign_bias(&mut self, val: u32) {
self._bitfield_1 &= !(1073741824usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 30u32) & (1073741824usize as u32);
}
#[inline]
pub fn lossless_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(2147483648usize as u32)) >> 31u32) as
u32)
}
}
#[inline]
pub fn set_lossless_flag(&mut self, val: u32) {
self._bitfield_1 &= !(2147483648usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 31u32) & (2147483648usize as u32);
}
}
#[test]
fn bindgen_test_layout__VADecPictureParameterBufferVP9__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VADecPictureParameterBufferVP9__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VADecPictureParameterBufferVP9__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VADecPictureParameterBufferVP9__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VADecPictureParameterBufferVP9() {
assert_eq!(::std::mem::size_of::<_VADecPictureParameterBufferVP9>() ,
60usize);
assert_eq!(::std::mem::align_of::<_VADecPictureParameterBufferVP9>() ,
4usize);
}
impl Clone for _VADecPictureParameterBufferVP9 {
fn clone(&self) -> Self { *self }
}
pub type VADecPictureParameterBufferVP9 = _VADecPictureParameterBufferVP9;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASegmentParameterVP9 {
pub segment_flags: _VASegmentParameterVP9__bindgen_ty_1,
pub filter_level: [[u8; 2usize]; 4usize],
pub luma_ac_quant_scale: i16,
pub luma_dc_quant_scale: i16,
pub chroma_ac_quant_scale: i16,
pub chroma_dc_quant_scale: i16,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASegmentParameterVP9__bindgen_ty_1 {
pub fields: __BindgenUnionField<_VASegmentParameterVP9__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<u16>,
pub bindgen_union_field: u16,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASegmentParameterVP9__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u8,
}
#[test]
fn bindgen_test_layout__VASegmentParameterVP9__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VASegmentParameterVP9__bindgen_ty_1__bindgen_ty_1>()
, 2usize);
assert_eq!(::std::mem::align_of::<_VASegmentParameterVP9__bindgen_ty_1__bindgen_ty_1>()
, 2usize);
}
impl Clone for _VASegmentParameterVP9__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VASegmentParameterVP9__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn segment_reference_enabled(&self) -> u16 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u8)) >>
0u32) as u16)
}
}
#[inline]
pub fn set_segment_reference_enabled(&mut self, val: u16) {
self._bitfield_1 &= !(1usize as u8);
self._bitfield_1 |= ((val as u16 as u8) << 0u32) & (1usize as u8);
}
#[inline]
pub fn segment_reference(&self) -> u16 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (6usize as u8)) >>
1u32) as u16)
}
}
#[inline]
pub fn set_segment_reference(&mut self, val: u16) {
self._bitfield_1 &= !(6usize as u8);
self._bitfield_1 |= ((val as u16 as u8) << 1u32) & (6usize as u8);
}
#[inline]
pub fn segment_reference_skipped(&self) -> u16 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u8)) >>
3u32) as u16)
}
}
#[inline]
pub fn set_segment_reference_skipped(&mut self, val: u16) {
self._bitfield_1 &= !(8usize as u8);
self._bitfield_1 |= ((val as u16 as u8) << 3u32) & (8usize as u8);
}
}
#[test]
fn bindgen_test_layout__VASegmentParameterVP9__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VASegmentParameterVP9__bindgen_ty_1>() ,
2usize);
assert_eq!(::std::mem::align_of::<_VASegmentParameterVP9__bindgen_ty_1>()
, 2usize);
}
impl Clone for _VASegmentParameterVP9__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VASegmentParameterVP9() {
assert_eq!(::std::mem::size_of::<_VASegmentParameterVP9>() , 18usize);
assert_eq!(::std::mem::align_of::<_VASegmentParameterVP9>() , 2usize);
}
impl Clone for _VASegmentParameterVP9 {
fn clone(&self) -> Self { *self }
}
pub type VASegmentParameterVP9 = _VASegmentParameterVP9;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VASliceParameterBufferVP9 {
pub slice_data_size: u32,
pub slice_data_offset: u32,
pub slice_data_flag: u32,
pub seg_param: [VASegmentParameterVP9; 8usize],
}
#[test]
fn bindgen_test_layout__VASliceParameterBufferVP9() {
assert_eq!(::std::mem::size_of::<_VASliceParameterBufferVP9>() ,
156usize);
assert_eq!(::std::mem::align_of::<_VASliceParameterBufferVP9>() , 4usize);
}
impl Clone for _VASliceParameterBufferVP9 {
fn clone(&self) -> Self { *self }
}
pub type VASliceParameterBufferVP9 = _VASliceParameterBufferVP9;
pub const VAEncPackedHeaderHEVC_VPS: _bindgen_ty_17 =
_bindgen_ty_17::VAEncPackedHeaderHEVC_VPS;
pub const VAEncPackedHeaderHEVC_SPS: _bindgen_ty_17 =
_bindgen_ty_17::VAEncPackedHeaderHEVC_VPS;
pub const VAEncPackedHeaderHEVC_PPS: _bindgen_ty_17 =
_bindgen_ty_17::VAEncPackedHeaderHEVC_PPS;
pub const VAEncPackedHeaderHEVC_Slice: _bindgen_ty_17 =
_bindgen_ty_17::VAEncPackedHeaderHEVC_Slice;
pub const VAEncPackedHeaderHEVC_SEI: _bindgen_ty_17 =
_bindgen_ty_17::VAEncPackedHeaderHEVC_SEI;
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_17 {
VAEncPackedHeaderHEVC_VPS = 1,
VAEncPackedHeaderHEVC_PPS = 2,
VAEncPackedHeaderHEVC_Slice = 3,
VAEncPackedHeaderHEVC_SEI = 2147483649,
}
pub use self::_bindgen_ty_17 as VAEncPackedHeaderTypeHEVC;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferHEVC {
pub general_profile_idc: u8,
pub general_level_idc: u8,
pub general_tier_flag: u8,
pub intra_period: u32,
pub intra_idr_period: u32,
pub ip_period: u32,
pub bits_per_second: u32,
pub pic_width_in_luma_samples: u16,
pub pic_height_in_luma_samples: u16,
pub seq_fields: _VAEncSequenceParameterBufferHEVC__bindgen_ty_1,
pub log2_min_luma_coding_block_size_minus3: u8,
pub log2_diff_max_min_luma_coding_block_size: u8,
pub log2_min_transform_block_size_minus2: u8,
pub log2_diff_max_min_transform_block_size: u8,
pub max_transform_hierarchy_depth_inter: u8,
pub max_transform_hierarchy_depth_intra: u8,
pub pcm_sample_bit_depth_luma_minus1: u32,
pub pcm_sample_bit_depth_chroma_minus1: u32,
pub log2_min_pcm_luma_coding_block_size_minus3: u32,
pub log2_max_pcm_luma_coding_block_size_minus3: u32,
pub vui_parameters_present_flag: u8,
pub vui_fields: _VAEncSequenceParameterBufferHEVC__bindgen_ty_2,
pub aspect_ratio_idc: u8,
pub sar_width: u32,
pub sar_height: u32,
pub vui_num_units_in_tick: u32,
pub vui_time_scale: u32,
pub min_spatial_segmentation_idc: u16,
pub max_bytes_per_pic_denom: u8,
pub max_bits_per_min_cu_denom: u8,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferHEVC__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAEncSequenceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<u32>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncSequenceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncSequenceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn chroma_format_idc(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_chroma_format_idc(&mut self, val: u32) {
self._bitfield_1 &= !(3usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (3usize as u32);
}
#[inline]
pub fn separate_colour_plane_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u32)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_separate_colour_plane_flag(&mut self, val: u32) {
self._bitfield_1 &= !(4usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 2u32) & (4usize as u32);
}
#[inline]
pub fn bit_depth_luma_minus8(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (56usize as u32)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_bit_depth_luma_minus8(&mut self, val: u32) {
self._bitfield_1 &= !(56usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 3u32) & (56usize as u32);
}
#[inline]
pub fn bit_depth_chroma_minus8(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (448usize as u32)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_bit_depth_chroma_minus8(&mut self, val: u32) {
self._bitfield_1 &= !(448usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 6u32) & (448usize as u32);
}
#[inline]
pub fn scaling_list_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (512usize as u32)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_scaling_list_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(512usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 9u32) & (512usize as u32);
}
#[inline]
pub fn strong_intra_smoothing_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1024usize as u32)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_strong_intra_smoothing_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(1024usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 10u32) & (1024usize as u32);
}
#[inline]
pub fn amp_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2048usize as u32)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_amp_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(2048usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 11u32) & (2048usize as u32);
}
#[inline]
pub fn sample_adaptive_offset_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4096usize as u32)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_sample_adaptive_offset_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(4096usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 12u32) & (4096usize as u32);
}
#[inline]
pub fn pcm_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8192usize as u32)) >>
13u32) as u32)
}
}
#[inline]
pub fn set_pcm_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(8192usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 13u32) & (8192usize as u32);
}
#[inline]
pub fn pcm_loop_filter_disabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16384usize as u32)) >>
14u32) as u32)
}
}
#[inline]
pub fn set_pcm_loop_filter_disabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(16384usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (16384usize as u32);
}
#[inline]
pub fn sps_temporal_mvp_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32768usize as u32)) >>
15u32) as u32)
}
}
#[inline]
pub fn set_sps_temporal_mvp_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(32768usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 15u32) & (32768usize as u32);
}
#[inline]
pub fn reserved_bits(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(4294901760usize as u32)) >> 16u32) as
u32)
}
}
#[inline]
pub fn set_reserved_bits(&mut self, val: u32) {
self._bitfield_1 &= !(4294901760usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 16u32) & (4294901760usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferHEVC__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferHEVC__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferHEVC__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncSequenceParameterBufferHEVC__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferHEVC__bindgen_ty_2 {
pub bits: __BindgenUnionField<_VAEncSequenceParameterBufferHEVC__bindgen_ty_2__bindgen_ty_1>,
pub value: __BindgenUnionField<u32>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferHEVC__bindgen_ty_2__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferHEVC__bindgen_ty_2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferHEVC__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferHEVC__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncSequenceParameterBufferHEVC__bindgen_ty_2__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncSequenceParameterBufferHEVC__bindgen_ty_2__bindgen_ty_1 {
#[inline]
pub fn aspect_ratio_info_present_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_aspect_ratio_info_present_flag(&mut self, val: u32) {
self._bitfield_1 &= !(1usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (1usize as u32);
}
#[inline]
pub fn neutral_chroma_indication_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u32)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_neutral_chroma_indication_flag(&mut self, val: u32) {
self._bitfield_1 &= !(2usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 1u32) & (2usize as u32);
}
#[inline]
pub fn field_seq_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u32)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_field_seq_flag(&mut self, val: u32) {
self._bitfield_1 &= !(4usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 2u32) & (4usize as u32);
}
#[inline]
pub fn vui_timing_info_present_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u32)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_vui_timing_info_present_flag(&mut self, val: u32) {
self._bitfield_1 &= !(8usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 3u32) & (8usize as u32);
}
#[inline]
pub fn bitstream_restriction_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_bitstream_restriction_flag(&mut self, val: u32) {
self._bitfield_1 &= !(16usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (16usize as u32);
}
#[inline]
pub fn tiles_fixed_structure_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u32)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_tiles_fixed_structure_flag(&mut self, val: u32) {
self._bitfield_1 &= !(32usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 5u32) & (32usize as u32);
}
#[inline]
pub fn motion_vectors_over_pic_boundaries_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u32)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_motion_vectors_over_pic_boundaries_flag(&mut self, val: u32) {
self._bitfield_1 &= !(64usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 6u32) & (64usize as u32);
}
#[inline]
pub fn restricted_ref_pic_lists_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u32)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_restricted_ref_pic_lists_flag(&mut self, val: u32) {
self._bitfield_1 &= !(128usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 7u32) & (128usize as u32);
}
#[inline]
pub fn log2_max_mv_length_horizontal(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (7936usize as u32)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_log2_max_mv_length_horizontal(&mut self, val: u32) {
self._bitfield_1 &= !(7936usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 8u32) & (7936usize as u32);
}
#[inline]
pub fn log2_max_mv_length_vertical(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (253952usize as u32))
>> 13u32) as u32)
}
}
#[inline]
pub fn set_log2_max_mv_length_vertical(&mut self, val: u32) {
self._bitfield_1 &= !(253952usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 13u32) & (253952usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferHEVC__bindgen_ty_2() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferHEVC__bindgen_ty_2>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferHEVC__bindgen_ty_2>()
, 4usize);
}
impl Clone for _VAEncSequenceParameterBufferHEVC__bindgen_ty_2 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferHEVC() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferHEVC>() ,
84usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferHEVC>() ,
4usize);
}
impl Clone for _VAEncSequenceParameterBufferHEVC {
fn clone(&self) -> Self { *self }
}
pub type VAEncSequenceParameterBufferHEVC = _VAEncSequenceParameterBufferHEVC;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferHEVC {
pub decoded_curr_pic: VAPictureHEVC,
pub reference_frames: [VAPictureHEVC; 15usize],
pub coded_buf: VABufferID,
pub collocated_ref_pic_index: u8,
pub last_picture: u8,
pub pic_init_qp: u8,
pub diff_cu_qp_delta_depth: u8,
pub pps_cb_qp_offset: i8,
pub pps_cr_qp_offset: i8,
pub num_tile_columns_minus1: u8,
pub num_tile_rows_minus1: u8,
pub column_width_minus1: [u8; 19usize],
pub row_height_minus1: [u8; 21usize],
pub log2_parallel_merge_level_minus2: u8,
pub ctu_max_bitsize_allowed: u8,
pub num_ref_idx_l0_default_active_minus1: u8,
pub num_ref_idx_l1_default_active_minus1: u8,
pub slice_pic_parameter_set_id: u8,
pub nal_unit_type: u8,
pub pic_fields: _VAEncPictureParameterBufferHEVC__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct
|
{
pub bits: __BindgenUnionField<_VAEncPictureParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<u32>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncPictureParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn idr_pic_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_idr_pic_flag(&mut self, val: u32) {
self._bitfield_1 &= !(1usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (1usize as u32);
}
#[inline]
pub fn coding_type(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (14usize as u32)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_coding_type(&mut self, val: u32) {
self._bitfield_1 &= !(14usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 1u32) & (14usize as u32);
}
#[inline]
pub fn reference_pic_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_reference_pic_flag(&mut self, val: u32) {
self._bitfield_1 &= !(16usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (16usize as u32);
}
#[inline]
pub fn dependent_slice_segments_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u32)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_dependent_slice_segments_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(32usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 5u32) & (32usize as u32);
}
#[inline]
pub fn sign_data_hiding_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u32)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_sign_data_hiding_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(64usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 6u32) & (64usize as u32);
}
#[inline]
pub fn constrained_intra_pred_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u32)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_constrained_intra_pred_flag(&mut self, val: u32) {
self._bitfield_1 &= !(128usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 7u32) & (128usize as u32);
}
#[inline]
pub fn transform_skip_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u32)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_transform_skip_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(256usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 8u32) & (256usize as u32);
}
#[inline]
pub fn cu_qp_delta_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (512usize as u32)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_cu_qp_delta_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(512usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 9u32) & (512usize as u32);
}
#[inline]
pub fn weighted_pred_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1024usize as u32)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_weighted_pred_flag(&mut self, val: u32) {
self._bitfield_1 &= !(1024usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 10u32) & (1024usize as u32);
}
#[inline]
pub fn weighted_bipred_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2048usize as u32)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_weighted_bipred_flag(&mut self, val: u32) {
self._bitfield_1 &= !(2048usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 11u32) & (2048usize as u32);
}
#[inline]
pub fn transquant_bypass_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4096usize as u32)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_transquant_bypass_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(4096usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 12u32) & (4096usize as u32);
}
#[inline]
pub fn tiles_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8192usize as u32)) >>
13u32) as u32)
}
}
#[inline]
pub fn set_tiles_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(8192usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 13u32) & (8192usize as u32);
}
#[inline]
pub fn entropy_coding_sync_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16384usize as u32)) >>
14u32) as u32)
}
}
#[inline]
pub fn set_entropy_coding_sync_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(16384usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (16384usize as u32);
}
#[inline]
pub fn loop_filter_across_tiles_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32768usize as u32)) >>
15u32) as u32)
}
}
#[inline]
pub fn set_loop_filter_across_tiles_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(32768usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 15u32) & (32768usize as u32);
}
#[inline]
pub fn pps_loop_filter_across_slices_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (65536usize as u32)) >>
16u32) as u32)
}
}
#[inline]
pub fn set_pps_loop_filter_across_slices_enabled_flag(&mut self,
val: u32) {
self._bitfield_1 &= !(65536usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 16u32) & (65536usize as u32);
}
#[inline]
pub fn scaling_list_data_present_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (131072usize as u32))
>> 17u32) as u32)
}
}
#[inline]
pub fn set_scaling_list_data_present_flag(&mut self, val: u32) {
self._bitfield_1 &= !(131072usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 17u32) & (131072usize as u32);
}
#[inline]
pub fn screen_content_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (262144usize as u32))
>> 18u32) as u32)
}
}
#[inline]
pub fn set_screen_content_flag(&mut self, val: u32) {
self._bitfield_1 &= !(262144usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 18u32) & (262144usize as u32);
}
#[inline]
pub fn enable_gpu_weighted_prediction(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (524288usize as u32))
>> 19u32) as u32)
}
}
#[inline]
pub fn set_enable_gpu_weighted_prediction(&mut self, val: u32) {
self._bitfield_1 &= !(524288usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 19u32) & (524288usize as u32);
}
#[inline]
pub fn no_output_of_prior_pics_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1048576usize as u32))
>> 20u32) as u32)
}
}
#[inline]
pub fn set_no_output_of_prior_pics_flag(&mut self, val: u32) {
self._bitfield_1 &= !(1048576usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 20u32) & (1048576usize as u32);
}
#[inline]
pub fn reserved(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(4292870144usize as u32)) >> 21u32) as
u32)
}
}
#[inline]
pub fn set_reserved(&mut self, val: u32) {
self._bitfield_1 &= !(4292870144usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 21u32) & (4292870144usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferHEVC__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferHEVC__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferHEVC__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferHEVC__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferHEVC() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferHEVC>() ,
256usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferHEVC>() ,
4usize);
}
impl Clone for _VAEncPictureParameterBufferHEVC {
fn clone(&self) -> Self { *self }
}
pub type VAEncPictureParameterBufferHEVC = _VAEncPictureParameterBufferHEVC;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSliceParameterBufferHEVC {
pub slice_segment_address: u32,
pub num_ctu_in_slice: u32,
pub slice_type: u8,
pub slice_pic_parameter_set_id: u8,
pub num_ref_idx_l0_active_minus1: u8,
pub num_ref_idx_l1_active_minus1: u8,
pub ref_pic_list0: [VAPictureHEVC; 15usize],
pub ref_pic_list1: [VAPictureHEVC; 15usize],
pub luma_log2_weight_denom: u8,
pub delta_chroma_log2_weight_denom: i8,
pub delta_luma_weight_l0: [i8; 15usize],
pub luma_offset_l0: [i8; 15usize],
pub delta_chroma_weight_l0: [[i8; 2usize]; 15usize],
pub chroma_offset_l0: [[i8; 2usize]; 15usize],
pub delta_luma_weight_l1: [i8; 15usize],
pub luma_offset_l1: [i8; 15usize],
pub delta_chroma_weight_l1: [[i8; 2usize]; 15usize],
pub chroma_offset_l1: [[i8; 2usize]; 15usize],
pub max_num_merge_cand: u8,
pub slice_qp_delta: i8,
pub slice_cb_qp_offset: i8,
pub slice_cr_qp_offset: i8,
pub slice_beta_offset_div2: i8,
pub slice_tc_offset_div2: i8,
pub slice_fields: _VAEncSliceParameterBufferHEVC__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSliceParameterBufferHEVC__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAEncSliceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<u32>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSliceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u16,
}
#[test]
fn bindgen_test_layout__VAEncSliceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSliceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSliceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncSliceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncSliceParameterBufferHEVC__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn last_slice_of_pic_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u16)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_last_slice_of_pic_flag(&mut self, val: u32) {
self._bitfield_1 &= !(1usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 0u32) & (1usize as u16);
}
#[inline]
pub fn dependent_slice_segment_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u16)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_dependent_slice_segment_flag(&mut self, val: u32) {
self._bitfield_1 &= !(2usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 1u32) & (2usize as u16);
}
#[inline]
pub fn colour_plane_id(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12usize as u16)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_colour_plane_id(&mut self, val: u32) {
self._bitfield_1 &= !(12usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 2u32) & (12usize as u16);
}
#[inline]
pub fn slice_temporal_mvp_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u16)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_slice_temporal_mvp_enabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(16usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 4u32) & (16usize as u16);
}
#[inline]
pub fn slice_sao_luma_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u16)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_slice_sao_luma_flag(&mut self, val: u32) {
self._bitfield_1 &= !(32usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 5u32) & (32usize as u16);
}
#[inline]
pub fn slice_sao_chroma_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u16)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_slice_sao_chroma_flag(&mut self, val: u32) {
self._bitfield_1 &= !(64usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 6u32) & (64usize as u16);
}
#[inline]
pub fn num_ref_idx_active_override_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u16)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_num_ref_idx_active_override_flag(&mut self, val: u32) {
self._bitfield_1 &= !(128usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 7u32) & (128usize as u16);
}
#[inline]
pub fn mvd_l1_zero_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u16)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_mvd_l1_zero_flag(&mut self, val: u32) {
self._bitfield_1 &= !(256usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 8u32) & (256usize as u16);
}
#[inline]
pub fn cabac_init_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (512usize as u16)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_cabac_init_flag(&mut self, val: u32) {
self._bitfield_1 &= !(512usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 9u32) & (512usize as u16);
}
#[inline]
pub fn slice_deblocking_filter_disabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3072usize as u16)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_slice_deblocking_filter_disabled_flag(&mut self, val: u32) {
self._bitfield_1 &= !(3072usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 10u32) & (3072usize as u16);
}
#[inline]
pub fn slice_loop_filter_across_slices_enabled_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4096usize as u16)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_slice_loop_filter_across_slices_enabled_flag(&mut self,
val: u32) {
self._bitfield_1 &= !(4096usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 12u32) & (4096usize as u16);
}
#[inline]
pub fn collocated_from_l0_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8192usize as u16)) >>
13u32) as u32)
}
}
#[inline]
pub fn set_collocated_from_l0_flag(&mut self, val: u32) {
self._bitfield_1 &= !(8192usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 13u32) & (8192usize as u16);
}
}
#[test]
fn bindgen_test_layout__VAEncSliceParameterBufferHEVC__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSliceParameterBufferHEVC__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSliceParameterBufferHEVC__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncSliceParameterBufferHEVC__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncSliceParameterBufferHEVC() {
assert_eq!(::std::mem::size_of::<_VAEncSliceParameterBufferHEVC>() ,
564usize);
assert_eq!(::std::mem::align_of::<_VAEncSliceParameterBufferHEVC>() ,
4usize);
}
impl Clone for _VAEncSliceParameterBufferHEVC {
fn clone(&self) -> Self { *self }
}
pub type VAEncSliceParameterBufferHEVC = _VAEncSliceParameterBufferHEVC;
#[repr(C)]
#[derive(Copy)]
pub struct _VAQMatrixBufferHEVC {
pub scaling_lists_4x4: [[[u8; 16usize]; 2usize]; 3usize],
pub scaling_lists_8x8: [[[u8; 64usize]; 2usize]; 3usize],
pub scaling_lists_16x16: [[[u8; 64usize]; 2usize]; 3usize],
pub scaling_lists_32x32: [[u8; 64usize]; 2usize],
pub scaling_list_dc_16x16: [[u8; 2usize]; 3usize],
pub scaling_list_dc_32x32: [u8; 2usize],
}
#[test]
fn bindgen_test_layout__VAQMatrixBufferHEVC() {
assert_eq!(::std::mem::size_of::<_VAQMatrixBufferHEVC>() , 1000usize);
assert_eq!(::std::mem::align_of::<_VAQMatrixBufferHEVC>() , 1usize);
}
impl Clone for _VAQMatrixBufferHEVC {
fn clone(&self) -> Self { *self }
}
pub type VAQMatrixBufferHEVC = _VAQMatrixBufferHEVC;
pub const VAEncPackedHeaderH264_SPS: _bindgen_ty_18 =
_bindgen_ty_18::VAEncPackedHeaderH264_SPS;
pub const VAEncPackedHeaderH264_PPS: _bindgen_ty_18 =
_bindgen_ty_18::VAEncPackedHeaderH264_PPS;
pub const VAEncPackedHeaderH264_Slice: _bindgen_ty_18 =
_bindgen_ty_18::VAEncPackedHeaderH264_Slice;
pub const VAEncPackedHeaderH264_SEI: _bindgen_ty_18 =
_bindgen_ty_18::VAEncPackedHeaderH264_SEI;
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_18 {
VAEncPackedHeaderH264_SPS = 1,
VAEncPackedHeaderH264_PPS = 2,
VAEncPackedHeaderH264_Slice = 3,
VAEncPackedHeaderH264_SEI = 2147483649,
}
pub use self::_bindgen_ty_18 as VAEncPackedHeaderTypeH264;
#[repr(C)]
pub struct _VAEncSequenceParameterBufferH264 {
pub seq_parameter_set_id: ::std::os::raw::c_uchar,
pub level_idc: ::std::os::raw::c_uchar,
pub intra_period: ::std::os::raw::c_uint,
pub intra_idr_period: ::std::os::raw::c_uint,
pub ip_period: ::std::os::raw::c_uint,
pub bits_per_second: ::std::os::raw::c_uint,
pub max_num_ref_frames: ::std::os::raw::c_uint,
pub picture_width_in_mbs: ::std::os::raw::c_ushort,
pub picture_height_in_mbs: ::std::os::raw::c_ushort,
pub seq_fields: _VAEncSequenceParameterBufferH264__bindgen_ty_1,
pub bit_depth_luma_minus8: ::std::os::raw::c_uchar,
pub bit_depth_chroma_minus8: ::std::os::raw::c_uchar,
pub num_ref_frames_in_pic_order_cnt_cycle: ::std::os::raw::c_uchar,
pub offset_for_non_ref_pic: ::std::os::raw::c_int,
pub offset_for_top_to_bottom_field: ::std::os::raw::c_int,
pub offset_for_ref_frame: [::std::os::raw::c_int; 256usize],
pub frame_cropping_flag: ::std::os::raw::c_uchar,
pub frame_crop_left_offset: ::std::os::raw::c_uint,
pub frame_crop_right_offset: ::std::os::raw::c_uint,
pub frame_crop_top_offset: ::std::os::raw::c_uint,
pub frame_crop_bottom_offset: ::std::os::raw::c_uint,
pub vui_parameters_present_flag: ::std::os::raw::c_uchar,
pub vui_fields: _VAEncSequenceParameterBufferH264__bindgen_ty_2,
pub aspect_ratio_idc: ::std::os::raw::c_uchar,
pub sar_width: ::std::os::raw::c_uint,
pub sar_height: ::std::os::raw::c_uint,
pub num_units_in_tick: ::std::os::raw::c_uint,
pub time_scale: ::std::os::raw::c_uint,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferH264__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAEncSequenceParameterBufferH264__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferH264__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferH264__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferH264__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferH264__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncSequenceParameterBufferH264__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncSequenceParameterBufferH264__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn chroma_format_idc(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_chroma_format_idc(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(3usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (3usize as u32);
}
#[inline]
pub fn frame_mbs_only_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u32)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_frame_mbs_only_flag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 2u32) & (4usize as u32);
}
#[inline]
pub fn mb_adaptive_frame_field_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u32)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_mb_adaptive_frame_field_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(8usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 3u32) & (8usize as u32);
}
#[inline]
pub fn seq_scaling_matrix_present_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_seq_scaling_matrix_present_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (16usize as u32);
}
#[inline]
pub fn direct_8x8_inference_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u32)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_direct_8x8_inference_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(32usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 5u32) & (32usize as u32);
}
#[inline]
pub fn log2_max_frame_num_minus4(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (960usize as u32)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_log2_max_frame_num_minus4(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(960usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 6u32) & (960usize as u32);
}
#[inline]
pub fn pic_order_cnt_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3072usize as u32)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_pic_order_cnt_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(3072usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 10u32) & (3072usize as u32);
}
#[inline]
pub fn log2_max_pic_order_cnt_lsb_minus4(&self)
-> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (61440usize as u32)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_log2_max_pic_order_cnt_lsb_minus4(&mut self,
val:
::std::os::raw::c_uint) {
self._bitfield_1 &= !(61440usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 12u32) & (61440usize as u32);
}
#[inline]
pub fn delta_pic_order_always_zero_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (65536usize as u32)) >>
16u32) as u32)
}
}
#[inline]
pub fn set_delta_pic_order_always_zero_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(65536usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 16u32) & (65536usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferH264__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferH264__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferH264__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncSequenceParameterBufferH264__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferH264__bindgen_ty_2 {
pub bits: __BindgenUnionField<_VAEncSequenceParameterBufferH264__bindgen_ty_2__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferH264__bindgen_ty_2__bindgen_ty_1 {
pub _bitfield_1: u16,
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferH264__bindgen_ty_2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferH264__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferH264__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncSequenceParameterBufferH264__bindgen_ty_2__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncSequenceParameterBufferH264__bindgen_ty_2__bindgen_ty_1 {
#[inline]
pub fn aspect_ratio_info_present_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u16)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_aspect_ratio_info_present_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 0u32) & (1usize as u16);
}
#[inline]
pub fn timing_info_present_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u16)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_timing_info_present_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 1u32) & (2usize as u16);
}
#[inline]
pub fn bitstream_restriction_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u16)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_bitstream_restriction_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 2u32) & (4usize as u16);
}
#[inline]
pub fn log2_max_mv_length_horizontal(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (248usize as u16)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_log2_max_mv_length_horizontal(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(248usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 3u32) & (248usize as u16);
}
#[inline]
pub fn log2_max_mv_length_vertical(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (7936usize as u16)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_log2_max_mv_length_vertical(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(7936usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 8u32) & (7936usize as u16);
}
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferH264__bindgen_ty_2() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferH264__bindgen_ty_2>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferH264__bindgen_ty_2>()
, 4usize);
}
impl Clone for _VAEncSequenceParameterBufferH264__bindgen_ty_2 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferH264() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferH264>() ,
1116usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferH264>() ,
4usize);
}
pub type VAEncSequenceParameterBufferH264 = _VAEncSequenceParameterBufferH264;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferH264 {
pub CurrPic: VAPictureH264,
pub ReferenceFrames: [VAPictureH264; 16usize],
pub coded_buf: VABufferID,
pub pic_parameter_set_id: ::std::os::raw::c_uchar,
pub seq_parameter_set_id: ::std::os::raw::c_uchar,
pub last_picture: ::std::os::raw::c_uchar,
pub frame_num: ::std::os::raw::c_ushort,
pub pic_init_qp: ::std::os::raw::c_uchar,
pub num_ref_idx_l0_active_minus1: ::std::os::raw::c_uchar,
pub num_ref_idx_l1_active_minus1: ::std::os::raw::c_uchar,
pub chroma_qp_index_offset: ::std::os::raw::c_char,
pub second_chroma_qp_index_offset: ::std::os::raw::c_char,
pub pic_fields: _VAEncPictureParameterBufferH264__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferH264__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAEncPictureParameterBufferH264__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferH264__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u16,
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferH264__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferH264__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferH264__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferH264__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncPictureParameterBufferH264__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn idr_pic_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u16)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_idr_pic_flag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 0u32) & (1usize as u16);
}
#[inline]
pub fn reference_pic_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (6usize as u16)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_reference_pic_flag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(6usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 1u32) & (6usize as u16);
}
#[inline]
pub fn entropy_coding_mode_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u16)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_entropy_coding_mode_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(8usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 3u32) & (8usize as u16);
}
#[inline]
pub fn weighted_pred_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u16)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_weighted_pred_flag(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 4u32) & (16usize as u16);
}
#[inline]
pub fn weighted_bipred_idc(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (96usize as u16)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_weighted_bipred_idc(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(96usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 5u32) & (96usize as u16);
}
#[inline]
pub fn constrained_intra_pred_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u16)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_constrained_intra_pred_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(128usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 7u32) & (128usize as u16);
}
#[inline]
pub fn transform_8x8_mode_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u16)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_transform_8x8_mode_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(256usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 8u32) & (256usize as u16);
}
#[inline]
pub fn deblocking_filter_control_present_flag(&self)
-> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (512usize as u16)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_deblocking_filter_control_present_flag(&mut self,
val:
::std::os::raw::c_uint) {
self._bitfield_1 &= !(512usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 9u32) & (512usize as u16);
}
#[inline]
pub fn redundant_pic_cnt_present_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1024usize as u16)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_redundant_pic_cnt_present_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1024usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 10u32) & (1024usize as u16);
}
#[inline]
pub fn pic_order_present_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2048usize as u16)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_pic_order_present_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2048usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 11u32) & (2048usize as u16);
}
#[inline]
pub fn pic_scaling_matrix_present_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4096usize as u16)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_pic_scaling_matrix_present_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4096usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 12u32) & (4096usize as u16);
}
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferH264__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferH264__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferH264__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferH264__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferH264() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferH264>() ,
360usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferH264>() ,
4usize);
}
impl Clone for _VAEncPictureParameterBufferH264 {
fn clone(&self) -> Self { *self }
}
pub type VAEncPictureParameterBufferH264 = _VAEncPictureParameterBufferH264;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSliceParameterBufferH264 {
pub macroblock_address: ::std::os::raw::c_uint,
pub num_macroblocks: ::std::os::raw::c_uint,
pub macroblock_info: VABufferID,
pub slice_type: ::std::os::raw::c_uchar,
pub pic_parameter_set_id: ::std::os::raw::c_uchar,
pub idr_pic_id: ::std::os::raw::c_ushort,
pub pic_order_cnt_lsb: ::std::os::raw::c_ushort,
pub delta_pic_order_cnt_bottom: ::std::os::raw::c_int,
pub delta_pic_order_cnt: [::std::os::raw::c_int; 2usize],
pub direct_spatial_mv_pred_flag: ::std::os::raw::c_uchar,
pub num_ref_idx_active_override_flag: ::std::os::raw::c_uchar,
pub num_ref_idx_l0_active_minus1: ::std::os::raw::c_uchar,
pub num_ref_idx_l1_active_minus1: ::std::os::raw::c_uchar,
pub RefPicList0: [VAPictureH264; 32usize],
pub RefPicList1: [VAPictureH264; 32usize],
pub luma_log2_weight_denom: ::std::os::raw::c_uchar,
pub chroma_log2_weight_denom: ::std::os::raw::c_uchar,
pub luma_weight_l0_flag: ::std::os::raw::c_uchar,
pub luma_weight_l0: [::std::os::raw::c_short; 32usize],
pub luma_offset_l0: [::std::os::raw::c_short; 32usize],
pub chroma_weight_l0_flag: ::std::os::raw::c_uchar,
pub chroma_weight_l0: [[::std::os::raw::c_short; 2usize]; 32usize],
pub chroma_offset_l0: [[::std::os::raw::c_short; 2usize]; 32usize],
pub luma_weight_l1_flag: ::std::os::raw::c_uchar,
pub luma_weight_l1: [::std::os::raw::c_short; 32usize],
pub luma_offset_l1: [::std::os::raw::c_short; 32usize],
pub chroma_weight_l1_flag: ::std::os::raw::c_uchar,
pub chroma_weight_l1: [[::std::os::raw::c_short; 2usize]; 32usize],
pub chroma_offset_l1: [[::std::os::raw::c_short; 2usize]; 32usize],
pub cabac_init_idc: ::std::os::raw::c_uchar,
pub slice_qp_delta: ::std::os::raw::c_char,
pub disable_deblocking_filter_idc: ::std::os::raw::c_uchar,
pub slice_alpha_c0_offset_div2: ::std::os::raw::c_char,
pub slice_beta_offset_div2: ::std::os::raw::c_char,
}
#[test]
fn bindgen_test_layout__VAEncSliceParameterBufferH264() {
assert_eq!(::std::mem::size_of::<_VAEncSliceParameterBufferH264>() ,
2100usize);
assert_eq!(::std::mem::align_of::<_VAEncSliceParameterBufferH264>() ,
4usize);
}
impl Clone for _VAEncSliceParameterBufferH264 {
fn clone(&self) -> Self { *self }
}
pub type VAEncSliceParameterBufferH264 = _VAEncSliceParameterBufferH264;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMacroblockParameterBufferH264 {
pub qp: ::std::os::raw::c_uchar,
pub info: _VAEncMacroblockParameterBufferH264__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMacroblockParameterBufferH264__bindgen_ty_1 {
pub intra_fields: __BindgenUnionField<_VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_1>,
pub inter_fields: __BindgenUnionField<_VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_2>,
pub bindgen_union_field: [u32; 2usize],
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_1 {
pub bits: _VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_1__bindgen_ty_1,
pub value: ::std::os::raw::c_uint,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u16,
pub bindgen_union_field: u32,
}
#[test]
fn bindgen_test_layout__VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for
_VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_1__bindgen_ty_1
{
fn clone(&self) -> Self { *self }
}
impl _VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_1__bindgen_ty_1
{
#[inline]
pub fn pred_avail_override_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u16)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_pred_avail_override_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 0u32) & (1usize as u16);
}
#[inline]
pub fn pred_avail_flags(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (510usize as u16)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_pred_avail_flags(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(510usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 1u32) & (510usize as u16);
}
}
#[test]
fn bindgen_test_layout__VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_1>()
, 8usize);
assert_eq!(::std::mem::align_of::<_VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_1
{
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_2 {
pub bits: _VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_2__bindgen_ty_1,
pub value: ::std::os::raw::c_uint,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_2__bindgen_ty_1 {
pub bindgen_union_field: [u8; 0usize],
pub _address: u8,
}
#[test]
fn bindgen_test_layout__VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_2__bindgen_ty_1>()
, 0usize);
assert_eq!(::std::mem::align_of::<_VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_2__bindgen_ty_1>()
, 1usize);
}
impl Clone for
_VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_2__bindgen_ty_1
{
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_2() {
assert_eq!(::std::mem::size_of::<_VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_2>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_2>()
, 4usize);
}
impl Clone for _VAEncMacroblockParameterBufferH264__bindgen_ty_1__bindgen_ty_2
{
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncMacroblockParameterBufferH264__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncMacroblockParameterBufferH264__bindgen_ty_1>()
, 8usize);
assert_eq!(::std::mem::align_of::<_VAEncMacroblockParameterBufferH264__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncMacroblockParameterBufferH264__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncMacroblockParameterBufferH264() {
assert_eq!(::std::mem::size_of::<_VAEncMacroblockParameterBufferH264>() ,
12usize);
assert_eq!(::std::mem::align_of::<_VAEncMacroblockParameterBufferH264>() ,
4usize);
}
impl Clone for _VAEncMacroblockParameterBufferH264 {
fn clone(&self) -> Self { *self }
}
pub type VAEncMacroblockParameterBufferH264 =
_VAEncMacroblockParameterBufferH264;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferJPEG {
pub reconstructed_picture: VASurfaceID,
pub picture_width: ::std::os::raw::c_ushort,
pub picture_height: ::std::os::raw::c_ushort,
pub coded_buf: VABufferID,
pub pic_flags: _VAEncPictureParameterBufferJPEG__bindgen_ty_1,
pub sample_bit_depth: ::std::os::raw::c_uchar,
pub num_scan: ::std::os::raw::c_uchar,
pub num_components: ::std::os::raw::c_ushort,
pub component_id: [::std::os::raw::c_uchar; 4usize],
pub quantiser_table_selector: [::std::os::raw::c_uchar; 4usize],
pub quality: ::std::os::raw::c_uchar,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferJPEG__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAEncPictureParameterBufferJPEG__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferJPEG__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u8,
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferJPEG__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferJPEG__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferJPEG__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferJPEG__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncPictureParameterBufferJPEG__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn profile(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3usize as u8)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_profile(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(3usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 0u32) & (3usize as u8);
}
#[inline]
pub fn progressive(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u8)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_progressive(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 2u32) & (4usize as u8);
}
#[inline]
pub fn huffman(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u8)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_huffman(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(8usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 3u32) & (8usize as u8);
}
#[inline]
pub fn interleaved(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u8)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_interleaved(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 4u32) & (16usize as u8);
}
#[inline]
pub fn differential(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u8)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_differential(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(32usize as u8);
self._bitfield_1 |= ((val as u32 as u8) << 5u32) & (32usize as u8);
}
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferJPEG__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferJPEG__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferJPEG__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferJPEG__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferJPEG() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferJPEG>() ,
32usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferJPEG>() ,
4usize);
}
impl Clone for _VAEncPictureParameterBufferJPEG {
fn clone(&self) -> Self { *self }
}
pub type VAEncPictureParameterBufferJPEG = _VAEncPictureParameterBufferJPEG;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSliceParameterBufferJPEG {
pub restart_interval: ::std::os::raw::c_ushort,
pub num_components: ::std::os::raw::c_ushort,
pub components: [_VAEncSliceParameterBufferJPEG__bindgen_ty_1; 4usize],
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSliceParameterBufferJPEG__bindgen_ty_1 {
pub component_selector: ::std::os::raw::c_uchar,
pub dc_table_selector: ::std::os::raw::c_uchar,
pub ac_table_selector: ::std::os::raw::c_uchar,
}
#[test]
fn bindgen_test_layout__VAEncSliceParameterBufferJPEG__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSliceParameterBufferJPEG__bindgen_ty_1>()
, 3usize);
assert_eq!(::std::mem::align_of::<_VAEncSliceParameterBufferJPEG__bindgen_ty_1>()
, 1usize);
}
impl Clone for _VAEncSliceParameterBufferJPEG__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncSliceParameterBufferJPEG() {
assert_eq!(::std::mem::size_of::<_VAEncSliceParameterBufferJPEG>() ,
16usize);
assert_eq!(::std::mem::align_of::<_VAEncSliceParameterBufferJPEG>() ,
2usize);
}
impl Clone for _VAEncSliceParameterBufferJPEG {
fn clone(&self) -> Self { *self }
}
pub type VAEncSliceParameterBufferJPEG = _VAEncSliceParameterBufferJPEG;
#[repr(C)]
pub struct _VAQMatrixBufferJPEG {
pub load_lum_quantiser_matrix: ::std::os::raw::c_int,
pub load_chroma_quantiser_matrix: ::std::os::raw::c_int,
pub lum_quantiser_matrix: [::std::os::raw::c_uchar; 64usize],
pub chroma_quantiser_matrix: [::std::os::raw::c_uchar; 64usize],
}
#[test]
fn bindgen_test_layout__VAQMatrixBufferJPEG() {
assert_eq!(::std::mem::size_of::<_VAQMatrixBufferJPEG>() , 136usize);
assert_eq!(::std::mem::align_of::<_VAQMatrixBufferJPEG>() , 4usize);
}
pub type VAQMatrixBufferJPEG = _VAQMatrixBufferJPEG;
pub type VAQMatrixBufferMPEG2 = VAIQMatrixBufferMPEG2;
pub const VAEncPackedHeaderMPEG2_SPS: _bindgen_ty_19 =
_bindgen_ty_19::VAEncPackedHeaderMPEG2_SPS;
pub const VAEncPackedHeaderMPEG2_PPS: _bindgen_ty_19 =
_bindgen_ty_19::VAEncPackedHeaderMPEG2_PPS;
pub const VAEncPackedHeaderMPEG2_Slice: _bindgen_ty_19 =
_bindgen_ty_19::VAEncPackedHeaderMPEG2_Slice;
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _bindgen_ty_19 {
VAEncPackedHeaderMPEG2_SPS = 1,
VAEncPackedHeaderMPEG2_PPS = 2,
VAEncPackedHeaderMPEG2_Slice = 3,
}
pub use self::_bindgen_ty_19 as VAEncPackedHeaderTypeMPEG2;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferMPEG2 {
pub intra_period: ::std::os::raw::c_uint,
pub ip_period: ::std::os::raw::c_uint,
pub picture_width: ::std::os::raw::c_ushort,
pub picture_height: ::std::os::raw::c_ushort,
pub bits_per_second: ::std::os::raw::c_uint,
pub frame_rate: f32,
pub aspect_ratio_information: ::std::os::raw::c_ushort,
pub vbv_buffer_size: ::std::os::raw::c_uint,
pub sequence_extension: _VAEncSequenceParameterBufferMPEG2__bindgen_ty_1,
pub new_gop_header: ::std::os::raw::c_uint,
pub gop_header: _VAEncSequenceParameterBufferMPEG2__bindgen_ty_2,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferMPEG2__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAEncSequenceParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncSequenceParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1
{
fn clone(&self) -> Self { *self }
}
impl _VAEncSequenceParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn profile_and_level_indication(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (255usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_profile_and_level_indication(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(255usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (255usize as u32);
}
#[inline]
pub fn progressive_sequence(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u32)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_progressive_sequence(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(256usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 8u32) & (256usize as u32);
}
#[inline]
pub fn chroma_format(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1536usize as u32)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_chroma_format(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1536usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 9u32) & (1536usize as u32);
}
#[inline]
pub fn low_delay(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2048usize as u32)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_low_delay(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2048usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 11u32) & (2048usize as u32);
}
#[inline]
pub fn frame_rate_extension_n(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12288usize as u32)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_frame_rate_extension_n(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(12288usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 12u32) & (12288usize as u32);
}
#[inline]
pub fn frame_rate_extension_d(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (507904usize as u32))
>> 14u32) as u32)
}
}
#[inline]
pub fn set_frame_rate_extension_d(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(507904usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (507904usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferMPEG2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferMPEG2__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferMPEG2__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncSequenceParameterBufferMPEG2__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferMPEG2__bindgen_ty_2 {
pub bits: __BindgenUnionField<_VAEncSequenceParameterBufferMPEG2__bindgen_ty_2__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferMPEG2__bindgen_ty_2__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferMPEG2__bindgen_ty_2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferMPEG2__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferMPEG2__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncSequenceParameterBufferMPEG2__bindgen_ty_2__bindgen_ty_1
{
fn clone(&self) -> Self { *self }
}
impl _VAEncSequenceParameterBufferMPEG2__bindgen_ty_2__bindgen_ty_1 {
#[inline]
pub fn time_code(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (33554431usize as u32))
>> 0u32) as u32)
}
}
#[inline]
pub fn set_time_code(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(33554431usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 0u32) & (33554431usize as u32);
}
#[inline]
pub fn closed_gop(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (33554432usize as u32))
>> 25u32) as u32)
}
}
#[inline]
pub fn set_closed_gop(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(33554432usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 25u32) & (33554432usize as u32);
}
#[inline]
pub fn broken_link(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (67108864usize as u32))
>> 26u32) as u32)
}
}
#[inline]
pub fn set_broken_link(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(67108864usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 26u32) & (67108864usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferMPEG2__bindgen_ty_2() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferMPEG2__bindgen_ty_2>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferMPEG2__bindgen_ty_2>()
, 4usize);
}
impl Clone for _VAEncSequenceParameterBufferMPEG2__bindgen_ty_2 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferMPEG2() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferMPEG2>() ,
40usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferMPEG2>() ,
4usize);
}
impl Clone for _VAEncSequenceParameterBufferMPEG2 {
fn clone(&self) -> Self { *self }
}
pub type VAEncSequenceParameterBufferMPEG2 =
_VAEncSequenceParameterBufferMPEG2;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferMPEG2 {
pub forward_reference_picture: VASurfaceID,
pub backward_reference_picture: VASurfaceID,
pub reconstructed_picture: VASurfaceID,
pub coded_buf: VABufferID,
pub last_picture: ::std::os::raw::c_uchar,
pub picture_type: VAEncPictureType,
pub temporal_reference: ::std::os::raw::c_uint,
pub vbv_delay: ::std::os::raw::c_uint,
pub f_code: [[::std::os::raw::c_uchar; 2usize]; 2usize],
pub picture_coding_extension: _VAEncPictureParameterBufferMPEG2__bindgen_ty_1,
pub composite_display: _VAEncPictureParameterBufferMPEG2__bindgen_ty_2,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferMPEG2__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAEncPictureParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u16,
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncPictureParameterBufferMPEG2__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn intra_dc_precision(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (3usize as u16)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_intra_dc_precision(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(3usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 0u32) & (3usize as u16);
}
#[inline]
pub fn picture_structure(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12usize as u16)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_picture_structure(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(12usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 2u32) & (12usize as u16);
}
#[inline]
pub fn top_field_first(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u16)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_top_field_first(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 4u32) & (16usize as u16);
}
#[inline]
pub fn frame_pred_frame_dct(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u16)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_frame_pred_frame_dct(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(32usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 5u32) & (32usize as u16);
}
#[inline]
pub fn concealment_motion_vectors(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (64usize as u16)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_concealment_motion_vectors(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(64usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 6u32) & (64usize as u16);
}
#[inline]
pub fn q_scale_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (128usize as u16)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_q_scale_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(128usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 7u32) & (128usize as u16);
}
#[inline]
pub fn intra_vlc_format(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u16)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_intra_vlc_format(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(256usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 8u32) & (256usize as u16);
}
#[inline]
pub fn alternate_scan(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (512usize as u16)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_alternate_scan(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(512usize as u16);
self._bitfield_1 |= ((val as u32 as u16) << 9u32) & (512usize as u16);
}
#[inline]
pub fn repeat_first_field(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1024usize as u16)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_repeat_first_field(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1024usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 10u32) & (1024usize as u16);
}
#[inline]
pub fn progressive_frame(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2048usize as u16)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_progressive_frame(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2048usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 11u32) & (2048usize as u16);
}
#[inline]
pub fn composite_display_flag(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4096usize as u16)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_composite_display_flag(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4096usize as u16);
self._bitfield_1 |=
((val as u32 as u16) << 12u32) & (4096usize as u16);
}
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferMPEG2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferMPEG2__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferMPEG2__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferMPEG2__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferMPEG2__bindgen_ty_2 {
pub bits: __BindgenUnionField<_VAEncPictureParameterBufferMPEG2__bindgen_ty_2__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferMPEG2__bindgen_ty_2__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferMPEG2__bindgen_ty_2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferMPEG2__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferMPEG2__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferMPEG2__bindgen_ty_2__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncPictureParameterBufferMPEG2__bindgen_ty_2__bindgen_ty_1 {
#[inline]
pub fn v_axis(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_v_axis(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (1usize as u32);
}
#[inline]
pub fn field_sequence(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (14usize as u32)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_field_sequence(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(14usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 1u32) & (14usize as u32);
}
#[inline]
pub fn sub_carrier(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_sub_carrier(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (16usize as u32);
}
#[inline]
pub fn burst_amplitude(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4064usize as u32)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_burst_amplitude(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4064usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 5u32) & (4064usize as u32);
}
#[inline]
pub fn sub_carrier_phase(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1044480usize as u32))
>> 12u32) as u32)
}
}
#[inline]
pub fn set_sub_carrier_phase(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1044480usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 12u32) & (1044480usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferMPEG2__bindgen_ty_2() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferMPEG2__bindgen_ty_2>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferMPEG2__bindgen_ty_2>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferMPEG2__bindgen_ty_2 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferMPEG2() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferMPEG2>() ,
44usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferMPEG2>() ,
4usize);
}
impl Clone for _VAEncPictureParameterBufferMPEG2 {
fn clone(&self) -> Self { *self }
}
pub type VAEncPictureParameterBufferMPEG2 = _VAEncPictureParameterBufferMPEG2;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSliceParameterBufferMPEG2 {
pub macroblock_address: ::std::os::raw::c_uint,
pub num_macroblocks: ::std::os::raw::c_uint,
pub quantiser_scale_code: ::std::os::raw::c_int,
pub is_intra_slice: ::std::os::raw::c_int,
}
#[test]
fn bindgen_test_layout__VAEncSliceParameterBufferMPEG2() {
assert_eq!(::std::mem::size_of::<_VAEncSliceParameterBufferMPEG2>() ,
16usize);
assert_eq!(::std::mem::align_of::<_VAEncSliceParameterBufferMPEG2>() ,
4usize);
}
impl Clone for _VAEncSliceParameterBufferMPEG2 {
fn clone(&self) -> Self { *self }
}
pub type VAEncSliceParameterBufferMPEG2 = _VAEncSliceParameterBufferMPEG2;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferVP8 {
pub frame_width: ::std::os::raw::c_uint,
pub frame_height: ::std::os::raw::c_uint,
pub frame_width_scale: ::std::os::raw::c_uint,
pub frame_height_scale: ::std::os::raw::c_uint,
pub error_resilient: ::std::os::raw::c_uint,
pub kf_auto: ::std::os::raw::c_uint,
pub kf_min_dist: ::std::os::raw::c_uint,
pub kf_max_dist: ::std::os::raw::c_uint,
pub bits_per_second: ::std::os::raw::c_uint,
pub intra_period: ::std::os::raw::c_uint,
pub reference_frames: [VASurfaceID; 4usize],
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferVP8() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferVP8>() ,
56usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferVP8>() ,
4usize);
}
impl Clone for _VAEncSequenceParameterBufferVP8 {
fn clone(&self) -> Self { *self }
}
pub type VAEncSequenceParameterBufferVP8 = _VAEncSequenceParameterBufferVP8;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferVP8 {
pub reconstructed_frame: VASurfaceID,
pub ref_last_frame: VASurfaceID,
pub ref_gf_frame: VASurfaceID,
pub ref_arf_frame: VASurfaceID,
pub coded_buf: VABufferID,
pub ref_flags: _VAEncPictureParameterBufferVP8__bindgen_ty_1,
pub pic_flags: _VAEncPictureParameterBufferVP8__bindgen_ty_2,
pub loop_filter_level: [::std::os::raw::c_char; 4usize],
pub ref_lf_delta: [::std::os::raw::c_char; 4usize],
pub mode_lf_delta: [::std::os::raw::c_char; 4usize],
pub sharpness_level: ::std::os::raw::c_uchar,
pub clamp_qindex_high: ::std::os::raw::c_uchar,
pub clamp_qindex_low: ::std::os::raw::c_uchar,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferVP8__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAEncPictureParameterBufferVP8__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferVP8__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferVP8__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferVP8__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferVP8__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferVP8__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncPictureParameterBufferVP8__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn force_kf(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_force_kf(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (1usize as u32);
}
#[inline]
pub fn no_ref_last(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u32)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_no_ref_last(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 1u32) & (2usize as u32);
}
#[inline]
pub fn no_ref_gf(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u32)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_no_ref_gf(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 2u32) & (4usize as u32);
}
#[inline]
pub fn no_ref_arf(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u32)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_no_ref_arf(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(8usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 3u32) & (8usize as u32);
}
#[inline]
pub fn reserved(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(4294967280usize as u32)) >> 4u32) as
u32)
}
}
#[inline]
pub fn set_reserved(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(4294967280usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 4u32) & (4294967280usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferVP8__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferVP8__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferVP8__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferVP8__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferVP8__bindgen_ty_2 {
pub bits: __BindgenUnionField<_VAEncPictureParameterBufferVP8__bindgen_ty_2__bindgen_ty_1>,
pub value: __BindgenUnionField<::std::os::raw::c_uint>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferVP8__bindgen_ty_2__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferVP8__bindgen_ty_2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferVP8__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferVP8__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferVP8__bindgen_ty_2__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncPictureParameterBufferVP8__bindgen_ty_2__bindgen_ty_1 {
#[inline]
pub fn frame_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_frame_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (1usize as u32);
}
#[inline]
pub fn version(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (14usize as u32)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_version(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(14usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 1u32) & (14usize as u32);
}
#[inline]
pub fn show_frame(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_show_frame(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (16usize as u32);
}
#[inline]
pub fn color_space(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32usize as u32)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_color_space(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(32usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 5u32) & (32usize as u32);
}
#[inline]
pub fn recon_filter_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (192usize as u32)) >>
6u32) as u32)
}
}
#[inline]
pub fn set_recon_filter_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(192usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 6u32) & (192usize as u32);
}
#[inline]
pub fn loop_filter_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (768usize as u32)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_loop_filter_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(768usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 8u32) & (768usize as u32);
}
#[inline]
pub fn auto_partitions(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1024usize as u32)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_auto_partitions(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1024usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 10u32) & (1024usize as u32);
}
#[inline]
pub fn num_token_partitions(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (6144usize as u32)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_num_token_partitions(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(6144usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 11u32) & (6144usize as u32);
}
#[inline]
pub fn clamping_type(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8192usize as u32)) >>
13u32) as u32)
}
}
#[inline]
pub fn set_clamping_type(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(8192usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 13u32) & (8192usize as u32);
}
#[inline]
pub fn segmentation_enabled(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16384usize as u32)) >>
14u32) as u32)
}
}
#[inline]
pub fn set_segmentation_enabled(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(16384usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (16384usize as u32);
}
#[inline]
pub fn update_mb_segmentation_map(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32768usize as u32)) >>
15u32) as u32)
}
}
#[inline]
pub fn set_update_mb_segmentation_map(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(32768usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 15u32) & (32768usize as u32);
}
#[inline]
pub fn update_segment_feature_data(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (65536usize as u32)) >>
16u32) as u32)
}
}
#[inline]
pub fn set_update_segment_feature_data(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(65536usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 16u32) & (65536usize as u32);
}
#[inline]
pub fn loop_filter_adj_enable(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (131072usize as u32))
>> 17u32) as u32)
}
}
#[inline]
pub fn set_loop_filter_adj_enable(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(131072usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 17u32) & (131072usize as u32);
}
#[inline]
pub fn refresh_entropy_probs(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (262144usize as u32))
>> 18u32) as u32)
}
}
#[inline]
pub fn set_refresh_entropy_probs(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(262144usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 18u32) & (262144usize as u32);
}
#[inline]
pub fn refresh_golden_frame(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (524288usize as u32))
>> 19u32) as u32)
}
}
#[inline]
pub fn set_refresh_golden_frame(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(524288usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 19u32) & (524288usize as u32);
}
#[inline]
pub fn refresh_alternate_frame(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1048576usize as u32))
>> 20u32) as u32)
}
}
#[inline]
pub fn set_refresh_alternate_frame(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(1048576usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 20u32) & (1048576usize as u32);
}
#[inline]
pub fn refresh_last(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2097152usize as u32))
>> 21u32) as u32)
}
}
#[inline]
pub fn set_refresh_last(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(2097152usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 21u32) & (2097152usize as u32);
}
#[inline]
pub fn copy_buffer_to_golden(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12582912usize as u32))
>> 22u32) as u32)
}
}
#[inline]
pub fn set_copy_buffer_to_golden(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(12582912usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 22u32) & (12582912usize as u32);
}
#[inline]
pub fn copy_buffer_to_alternate(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (50331648usize as u32))
>> 24u32) as u32)
}
}
#[inline]
pub fn set_copy_buffer_to_alternate(&mut self,
val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(50331648usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 24u32) & (50331648usize as u32);
}
#[inline]
pub fn sign_bias_golden(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (67108864usize as u32))
>> 26u32) as u32)
}
}
#[inline]
pub fn set_sign_bias_golden(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(67108864usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 26u32) & (67108864usize as u32);
}
#[inline]
pub fn sign_bias_alternate(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(134217728usize as u32)) >> 27u32) as
u32)
}
}
#[inline]
pub fn set_sign_bias_alternate(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(134217728usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 27u32) & (134217728usize as u32);
}
#[inline]
pub fn mb_no_coeff_skip(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(268435456usize as u32)) >> 28u32) as
u32)
}
}
#[inline]
pub fn set_mb_no_coeff_skip(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(268435456usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 28u32) & (268435456usize as u32);
}
#[inline]
pub fn forced_lf_adjustment(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(536870912usize as u32)) >> 29u32) as
u32)
}
}
#[inline]
pub fn set_forced_lf_adjustment(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(536870912usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 29u32) & (536870912usize as u32);
}
#[inline]
pub fn reserved(&self) -> ::std::os::raw::c_uint {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(3221225472usize as u32)) >> 30u32) as
u32)
}
}
#[inline]
pub fn set_reserved(&mut self, val: ::std::os::raw::c_uint) {
self._bitfield_1 &= !(3221225472usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 30u32) & (3221225472usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferVP8__bindgen_ty_2() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferVP8__bindgen_ty_2>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferVP8__bindgen_ty_2>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferVP8__bindgen_ty_2 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferVP8() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferVP8>() ,
44usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferVP8>() ,
4usize);
}
impl Clone for _VAEncPictureParameterBufferVP8 {
fn clone(&self) -> Self { *self }
}
pub type VAEncPictureParameterBufferVP8 = _VAEncPictureParameterBufferVP8;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMBMapBufferVP8 {
pub num_mbs: ::std::os::raw::c_uint,
pub mb_segment_id: *mut ::std::os::raw::c_uchar,
}
#[test]
fn bindgen_test_layout__VAEncMBMapBufferVP8() {
assert_eq!(::std::mem::size_of::<_VAEncMBMapBufferVP8>() , 16usize);
assert_eq!(::std::mem::align_of::<_VAEncMBMapBufferVP8>() , 8usize);
}
impl Clone for _VAEncMBMapBufferVP8 {
fn clone(&self) -> Self { *self }
}
pub type VAEncMBMapBufferVP8 = _VAEncMBMapBufferVP8;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAQMatrixBufferVP8 {
pub quantization_index: [::std::os::raw::c_ushort; 4usize],
pub quantization_index_delta: [::std::os::raw::c_short; 5usize],
}
#[test]
fn bindgen_test_layout__VAQMatrixBufferVP8() {
assert_eq!(::std::mem::size_of::<_VAQMatrixBufferVP8>() , 18usize);
assert_eq!(::std::mem::align_of::<_VAQMatrixBufferVP8>() , 2usize);
}
impl Clone for _VAQMatrixBufferVP8 {
fn clone(&self) -> Self { *self }
}
pub type VAQMatrixBufferVP8 = _VAQMatrixBufferVP8;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VACodedBufferVP9Status {
pub base_qp_index: u16,
pub loop_filter_level: u8,
pub long_term_indication: u8,
pub next_frame_width: u16,
pub next_frame_height: u16,
}
#[test]
fn bindgen_test_layout__VACodedBufferVP9Status() {
assert_eq!(::std::mem::size_of::<_VACodedBufferVP9Status>() , 8usize);
assert_eq!(::std::mem::align_of::<_VACodedBufferVP9Status>() , 2usize);
}
impl Clone for _VACodedBufferVP9Status {
fn clone(&self) -> Self { *self }
}
pub type VACodedBufferVP9Status = _VACodedBufferVP9Status;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSequenceParameterBufferVP9 {
pub max_frame_width: u32,
pub max_frame_height: u32,
pub kf_auto: u32,
pub kf_min_dist: u32,
pub kf_max_dist: u32,
pub bits_per_second: u32,
pub intra_period: u32,
}
#[test]
fn bindgen_test_layout__VAEncSequenceParameterBufferVP9() {
assert_eq!(::std::mem::size_of::<_VAEncSequenceParameterBufferVP9>() ,
28usize);
assert_eq!(::std::mem::align_of::<_VAEncSequenceParameterBufferVP9>() ,
4usize);
}
impl Clone for _VAEncSequenceParameterBufferVP9 {
fn clone(&self) -> Self { *self }
}
pub type VAEncSequenceParameterBufferVP9 = _VAEncSequenceParameterBufferVP9;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferVP9 {
pub frame_width_src: u32,
pub frame_height_src: u32,
pub frame_width_dst: u32,
pub frame_height_dst: u32,
pub reconstructed_frame: VASurfaceID,
pub reference_frames: [VASurfaceID; 8usize],
pub coded_buf: VABufferID,
pub ref_flags: _VAEncPictureParameterBufferVP9__bindgen_ty_1,
pub pic_flags: _VAEncPictureParameterBufferVP9__bindgen_ty_2,
pub refresh_frame_flags: u8,
pub luma_ac_qindex: u8,
pub luma_dc_qindex_delta: i8,
pub chroma_ac_qindex_delta: i8,
pub chroma_dc_qindex_delta: i8,
pub filter_level: u8,
pub sharpness_level: u8,
pub ref_lf_delta: [i8; 4usize],
pub mode_lf_delta: [i8; 2usize],
pub bit_offset_ref_lf_delta: u16,
pub bit_offset_mode_lf_delta: u16,
pub bit_offset_lf_level: u16,
pub bit_offset_qindex: u16,
pub bit_offset_first_partition_size: u16,
pub bit_offset_segmentation: u16,
pub bit_size_segmentation: u16,
pub log2_tile_rows: u8,
pub log2_tile_columns: u8,
pub skip_frame_flag: u8,
pub number_skip_frames: u8,
pub skip_frames_size: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferVP9__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAEncPictureParameterBufferVP9__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<u32>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferVP9__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferVP9__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferVP9__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferVP9__bindgen_ty_1__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferVP9__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncPictureParameterBufferVP9__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn force_kf(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_force_kf(&mut self, val: u32) {
self._bitfield_1 &= !(1usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (1usize as u32);
}
#[inline]
pub fn ref_frame_ctrl_l0(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (14usize as u32)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_ref_frame_ctrl_l0(&mut self, val: u32) {
self._bitfield_1 &= !(14usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 1u32) & (14usize as u32);
}
#[inline]
pub fn ref_frame_ctrl_l1(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (112usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_ref_frame_ctrl_l1(&mut self, val: u32) {
self._bitfield_1 &= !(112usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (112usize as u32);
}
#[inline]
pub fn ref_last_idx(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (896usize as u32)) >>
7u32) as u32)
}
}
#[inline]
pub fn set_ref_last_idx(&mut self, val: u32) {
self._bitfield_1 &= !(896usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 7u32) & (896usize as u32);
}
#[inline]
pub fn ref_last_sign_bias(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1024usize as u32)) >>
10u32) as u32)
}
}
#[inline]
pub fn set_ref_last_sign_bias(&mut self, val: u32) {
self._bitfield_1 &= !(1024usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 10u32) & (1024usize as u32);
}
#[inline]
pub fn ref_gf_idx(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (14336usize as u32)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_ref_gf_idx(&mut self, val: u32) {
self._bitfield_1 &= !(14336usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 11u32) & (14336usize as u32);
}
#[inline]
pub fn ref_gf_sign_bias(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16384usize as u32)) >>
14u32) as u32)
}
}
#[inline]
pub fn set_ref_gf_sign_bias(&mut self, val: u32) {
self._bitfield_1 &= !(16384usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (16384usize as u32);
}
#[inline]
pub fn ref_arf_idx(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (229376usize as u32))
>> 15u32) as u32)
}
}
#[inline]
pub fn set_ref_arf_idx(&mut self, val: u32) {
self._bitfield_1 &= !(229376usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 15u32) & (229376usize as u32);
}
#[inline]
pub fn ref_arf_sign_bias(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (262144usize as u32))
>> 18u32) as u32)
}
}
#[inline]
pub fn set_ref_arf_sign_bias(&mut self, val: u32) {
self._bitfield_1 &= !(262144usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 18u32) & (262144usize as u32);
}
#[inline]
pub fn temporal_id(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(133693440usize as u32)) >> 19u32) as
u32)
}
}
#[inline]
pub fn set_temporal_id(&mut self, val: u32) {
self._bitfield_1 &= !(133693440usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 19u32) & (133693440usize as u32);
}
#[inline]
pub fn reserved(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(4160749568usize as u32)) >> 27u32) as
u32)
}
}
#[inline]
pub fn set_reserved(&mut self, val: u32) {
self._bitfield_1 &= !(4160749568usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 27u32) & (4160749568usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferVP9__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferVP9__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferVP9__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferVP9__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferVP9__bindgen_ty_2 {
pub bits: __BindgenUnionField<_VAEncPictureParameterBufferVP9__bindgen_ty_2__bindgen_ty_1>,
pub value: __BindgenUnionField<u32>,
pub bindgen_union_field: u32,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncPictureParameterBufferVP9__bindgen_ty_2__bindgen_ty_1 {
pub _bitfield_1: u32,
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferVP9__bindgen_ty_2__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferVP9__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferVP9__bindgen_ty_2__bindgen_ty_1>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferVP9__bindgen_ty_2__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncPictureParameterBufferVP9__bindgen_ty_2__bindgen_ty_1 {
#[inline]
pub fn frame_type(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u32)) >>
0u32) as u32)
}
}
#[inline]
pub fn set_frame_type(&mut self, val: u32) {
self._bitfield_1 &= !(1usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (1usize as u32);
}
#[inline]
pub fn show_frame(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2usize as u32)) >>
1u32) as u32)
}
}
#[inline]
pub fn set_show_frame(&mut self, val: u32) {
self._bitfield_1 &= !(2usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 1u32) & (2usize as u32);
}
#[inline]
pub fn error_resilient_mode(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (4usize as u32)) >>
2u32) as u32)
}
}
#[inline]
pub fn set_error_resilient_mode(&mut self, val: u32) {
self._bitfield_1 &= !(4usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 2u32) & (4usize as u32);
}
#[inline]
pub fn intra_only(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u32)) >>
3u32) as u32)
}
}
#[inline]
pub fn set_intra_only(&mut self, val: u32) {
self._bitfield_1 &= !(8usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 3u32) & (8usize as u32);
}
#[inline]
pub fn allow_high_precision_mv(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16usize as u32)) >>
4u32) as u32)
}
}
#[inline]
pub fn set_allow_high_precision_mv(&mut self, val: u32) {
self._bitfield_1 &= !(16usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (16usize as u32);
}
#[inline]
pub fn mcomp_filter_type(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (224usize as u32)) >>
5u32) as u32)
}
}
#[inline]
pub fn set_mcomp_filter_type(&mut self, val: u32) {
self._bitfield_1 &= !(224usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 5u32) & (224usize as u32);
}
#[inline]
pub fn frame_parallel_decoding_mode(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (256usize as u32)) >>
8u32) as u32)
}
}
#[inline]
pub fn set_frame_parallel_decoding_mode(&mut self, val: u32) {
self._bitfield_1 &= !(256usize as u32);
self._bitfield_1 |= ((val as u32 as u32) << 8u32) & (256usize as u32);
}
#[inline]
pub fn reset_frame_context(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1536usize as u32)) >>
9u32) as u32)
}
}
#[inline]
pub fn set_reset_frame_context(&mut self, val: u32) {
self._bitfield_1 &= !(1536usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 9u32) & (1536usize as u32);
}
#[inline]
pub fn refresh_frame_context(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2048usize as u32)) >>
11u32) as u32)
}
}
#[inline]
pub fn set_refresh_frame_context(&mut self, val: u32) {
self._bitfield_1 &= !(2048usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 11u32) & (2048usize as u32);
}
#[inline]
pub fn frame_context_idx(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (12288usize as u32)) >>
12u32) as u32)
}
}
#[inline]
pub fn set_frame_context_idx(&mut self, val: u32) {
self._bitfield_1 &= !(12288usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 12u32) & (12288usize as u32);
}
#[inline]
pub fn segmentation_enabled(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (16384usize as u32)) >>
14u32) as u32)
}
}
#[inline]
pub fn set_segmentation_enabled(&mut self, val: u32) {
self._bitfield_1 &= !(16384usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 14u32) & (16384usize as u32);
}
#[inline]
pub fn segmentation_temporal_update(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (32768usize as u32)) >>
15u32) as u32)
}
}
#[inline]
pub fn set_segmentation_temporal_update(&mut self, val: u32) {
self._bitfield_1 &= !(32768usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 15u32) & (32768usize as u32);
}
#[inline]
pub fn segmentation_update_map(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (65536usize as u32)) >>
16u32) as u32)
}
}
#[inline]
pub fn set_segmentation_update_map(&mut self, val: u32) {
self._bitfield_1 &= !(65536usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 16u32) & (65536usize as u32);
}
#[inline]
pub fn lossless_mode(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (131072usize as u32))
>> 17u32) as u32)
}
}
#[inline]
pub fn set_lossless_mode(&mut self, val: u32) {
self._bitfield_1 &= !(131072usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 17u32) & (131072usize as u32);
}
#[inline]
pub fn comp_prediction_mode(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (786432usize as u32))
>> 18u32) as u32)
}
}
#[inline]
pub fn set_comp_prediction_mode(&mut self, val: u32) {
self._bitfield_1 &= !(786432usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 18u32) & (786432usize as u32);
}
#[inline]
pub fn auto_segmentation(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1048576usize as u32))
>> 20u32) as u32)
}
}
#[inline]
pub fn set_auto_segmentation(&mut self, val: u32) {
self._bitfield_1 &= !(1048576usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 20u32) & (1048576usize as u32);
}
#[inline]
pub fn super_frame_flag(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (2097152usize as u32))
>> 21u32) as u32)
}
}
#[inline]
pub fn set_super_frame_flag(&mut self, val: u32) {
self._bitfield_1 &= !(2097152usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 21u32) & (2097152usize as u32);
}
#[inline]
pub fn reserved(&self) -> u32 {
unsafe {
::std::mem::transmute(((self._bitfield_1 &
(4290772992usize as u32)) >> 22u32) as
u32)
}
}
#[inline]
pub fn set_reserved(&mut self, val: u32) {
self._bitfield_1 &= !(4290772992usize as u32);
self._bitfield_1 |=
((val as u32 as u32) << 22u32) & (4290772992usize as u32);
}
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferVP9__bindgen_ty_2() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferVP9__bindgen_ty_2>()
, 4usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferVP9__bindgen_ty_2>()
, 4usize);
}
impl Clone for _VAEncPictureParameterBufferVP9__bindgen_ty_2 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncPictureParameterBufferVP9() {
assert_eq!(::std::mem::size_of::<_VAEncPictureParameterBufferVP9>() ,
100usize);
assert_eq!(::std::mem::align_of::<_VAEncPictureParameterBufferVP9>() ,
4usize);
}
impl Clone for _VAEncPictureParameterBufferVP9 {
fn clone(&self) -> Self { *self }
}
pub type VAEncPictureParameterBufferVP9 = _VAEncPictureParameterBufferVP9;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSegParamVP9 {
pub seg_flags: _VAEncSegParamVP9__bindgen_ty_1,
pub segment_lf_level_delta: i8,
pub segment_qindex_delta: i16,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSegParamVP9__bindgen_ty_1 {
pub bits: __BindgenUnionField<_VAEncSegParamVP9__bindgen_ty_1__bindgen_ty_1>,
pub value: __BindgenUnionField<u8>,
pub bindgen_union_field: u8,
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncSegParamVP9__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_1: u8,
}
#[test]
fn bindgen_test_layout__VAEncSegParamVP9__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSegParamVP9__bindgen_ty_1__bindgen_ty_1>()
, 1usize);
assert_eq!(::std::mem::align_of::<_VAEncSegParamVP9__bindgen_ty_1__bindgen_ty_1>()
, 1usize);
}
impl Clone for _VAEncSegParamVP9__bindgen_ty_1__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
impl _VAEncSegParamVP9__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn segment_reference_enabled(&self) -> u8 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (1usize as u8)) >>
0u32) as u8)
}
}
#[inline]
pub fn set_segment_reference_enabled(&mut self, val: u8) {
self._bitfield_1 &= !(1usize as u8);
self._bitfield_1 |= ((val as u8 as u8) << 0u32) & (1usize as u8);
}
#[inline]
pub fn segment_reference(&self) -> u8 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (6usize as u8)) >>
1u32) as u8)
}
}
#[inline]
pub fn set_segment_reference(&mut self, val: u8) {
self._bitfield_1 &= !(6usize as u8);
self._bitfield_1 |= ((val as u8 as u8) << 1u32) & (6usize as u8);
}
#[inline]
pub fn segment_reference_skipped(&self) -> u8 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (8usize as u8)) >>
3u32) as u8)
}
}
#[inline]
pub fn set_segment_reference_skipped(&mut self, val: u8) {
self._bitfield_1 &= !(8usize as u8);
self._bitfield_1 |= ((val as u8 as u8) << 3u32) & (8usize as u8);
}
#[inline]
pub fn reserved(&self) -> u8 {
unsafe {
::std::mem::transmute(((self._bitfield_1 & (240usize as u8)) >>
4u32) as u8)
}
}
#[inline]
pub fn set_reserved(&mut self, val: u8) {
self._bitfield_1 &= !(240usize as u8);
self._bitfield_1 |= ((val as u8 as u8) << 4u32) & (240usize as u8);
}
}
#[test]
fn bindgen_test_layout__VAEncSegParamVP9__bindgen_ty_1() {
assert_eq!(::std::mem::size_of::<_VAEncSegParamVP9__bindgen_ty_1>() ,
1usize);
assert_eq!(::std::mem::align_of::<_VAEncSegParamVP9__bindgen_ty_1>() ,
1usize);
}
impl Clone for _VAEncSegParamVP9__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
#[test]
fn bindgen_test_layout__VAEncSegParamVP9() {
assert_eq!(::std::mem::size_of::<_VAEncSegParamVP9>() , 4usize);
assert_eq!(::std::mem::align_of::<_VAEncSegParamVP9>() , 2usize);
}
impl Clone for _VAEncSegParamVP9 {
fn clone(&self) -> Self { *self }
}
pub type VAEncSegParamVP9 = _VAEncSegParamVP9;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAEncMiscParameterTypeVP9PerSegmantParam {
pub seg_data: [VAEncSegParamVP9; 8usize],
}
#[test]
fn bindgen_test_layout__VAEncMiscParameterTypeVP9PerSegmantParam() {
assert_eq!(::std::mem::size_of::<_VAEncMiscParameterTypeVP9PerSegmantParam>()
, 32usize);
assert_eq!(::std::mem::align_of::<_VAEncMiscParameterTypeVP9PerSegmantParam>()
, 2usize);
}
impl Clone for _VAEncMiscParameterTypeVP9PerSegmantParam {
fn clone(&self) -> Self { *self }
}
pub type VAEncMiscParameterTypeVP9PerSegmantParam =
_VAEncMiscParameterTypeVP9PerSegmantParam;
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _VAProcFilterType {
VAProcFilterNone = 0,
VAProcFilterNoiseReduction = 1,
VAProcFilterDeinterlacing = 2,
VAProcFilterSharpening = 3,
VAProcFilterColorBalance = 4,
VAProcFilterSkinToneEnhancement = 5,
VAProcFilterCount = 6,
}
pub use self::_VAProcFilterType as VAProcFilterType;
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _VAProcDeinterlacingType {
VAProcDeinterlacingNone = 0,
VAProcDeinterlacingBob = 1,
VAProcDeinterlacingWeave = 2,
VAProcDeinterlacingMotionAdaptive = 3,
VAProcDeinterlacingMotionCompensated = 4,
VAProcDeinterlacingCount = 5,
}
pub use self::_VAProcDeinterlacingType as VAProcDeinterlacingType;
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _VAProcColorBalanceType {
VAProcColorBalanceNone = 0,
VAProcColorBalanceHue = 1,
VAProcColorBalanceSaturation = 2,
VAProcColorBalanceBrightness = 3,
VAProcColorBalanceContrast = 4,
VAProcColorBalanceAutoSaturation = 5,
VAProcColorBalanceAutoBrightness = 6,
VAProcColorBalanceAutoContrast = 7,
VAProcColorBalanceCount = 8,
}
pub use self::_VAProcColorBalanceType as VAProcColorBalanceType;
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum _VAProcColorStandardType {
VAProcColorStandardNone = 0,
VAProcColorStandardBT601 = 1,
VAProcColorStandardBT709 = 2,
VAProcColorStandardBT470M = 3,
VAProcColorStandardBT470BG = 4,
VAProcColorStandardSMPTE170M = 5,
VAProcColorStandardSMPTE240M = 6,
VAProcColorStandardGenericFilm = 7,
VAProcColorStandardCount = 8,
}
pub use self::_VAProcColorStandardType as VAProcColorStandardType;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAProcPipelineCaps {
pub pipeline_flags: ::std::os::raw::c_uint,
pub filter_flags: ::std::os::raw::c_uint,
pub num_forward_references: ::std::os::raw::c_uint,
pub num_backward_references: ::std::os::raw::c_uint,
pub input_color_standards: *mut VAProcColorStandardType,
pub num_input_color_standards: ::std::os::raw::c_uint,
pub output_color_standards: *mut VAProcColorStandardType,
pub num_output_color_standards: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VAProcPipelineCaps() {
assert_eq!(::std::mem::size_of::<_VAProcPipelineCaps>() , 48usize);
assert_eq!(::std::mem::align_of::<_VAProcPipelineCaps>() , 8usize);
}
impl Clone for _VAProcPipelineCaps {
fn clone(&self) -> Self { *self }
}
pub type VAProcPipelineCaps = _VAProcPipelineCaps;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAProcFilterValueRange {
pub min_value: f32,
pub max_value: f32,
pub default_value: f32,
pub step: f32,
}
#[test]
fn bindgen_test_layout__VAProcFilterValueRange() {
assert_eq!(::std::mem::size_of::<_VAProcFilterValueRange>() , 16usize);
assert_eq!(::std::mem::align_of::<_VAProcFilterValueRange>() , 4usize);
}
impl Clone for _VAProcFilterValueRange {
fn clone(&self) -> Self { *self }
}
pub type VAProcFilterValueRange = _VAProcFilterValueRange;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAProcPipelineParameterBuffer {
pub surface: VASurfaceID,
pub surface_region: *const VARectangle,
pub surface_color_standard: VAProcColorStandardType,
pub output_region: *const VARectangle,
pub output_background_color: ::std::os::raw::c_uint,
pub output_color_standard: VAProcColorStandardType,
pub pipeline_flags: ::std::os::raw::c_uint,
pub filter_flags: ::std::os::raw::c_uint,
pub filters: *mut VABufferID,
pub num_filters: ::std::os::raw::c_uint,
pub forward_references: *mut VASurfaceID,
pub num_forward_references: ::std::os::raw::c_uint,
pub backward_references: *mut VASurfaceID,
pub num_backward_references: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VAProcPipelineParameterBuffer() {
assert_eq!(::std::mem::size_of::<_VAProcPipelineParameterBuffer>() ,
96usize);
assert_eq!(::std::mem::align_of::<_VAProcPipelineParameterBuffer>() ,
8usize);
}
impl Clone for _VAProcPipelineParameterBuffer {
fn clone(&self) -> Self { *self }
}
pub type VAProcPipelineParameterBuffer = _VAProcPipelineParameterBuffer;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAProcFilterParameterBufferBase {
pub type_: VAProcFilterType,
}
#[test]
fn bindgen_test_layout__VAProcFilterParameterBufferBase() {
assert_eq!(::std::mem::size_of::<_VAProcFilterParameterBufferBase>() ,
4usize);
assert_eq!(::std::mem::align_of::<_VAProcFilterParameterBufferBase>() ,
4usize);
}
impl Clone for _VAProcFilterParameterBufferBase {
fn clone(&self) -> Self { *self }
}
pub type VAProcFilterParameterBufferBase = _VAProcFilterParameterBufferBase;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAProcFilterParameterBuffer {
pub type_: VAProcFilterType,
pub value: f32,
}
#[test]
fn bindgen_test_layout__VAProcFilterParameterBuffer() {
assert_eq!(::std::mem::size_of::<_VAProcFilterParameterBuffer>() ,
8usize);
assert_eq!(::std::mem::align_of::<_VAProcFilterParameterBuffer>() ,
4usize);
}
impl Clone for _VAProcFilterParameterBuffer {
fn clone(&self) -> Self { *self }
}
pub type VAProcFilterParameterBuffer = _VAProcFilterParameterBuffer;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAProcFilterParameterBufferDeinterlacing {
pub type_: VAProcFilterType,
pub algorithm: VAProcDeinterlacingType,
pub flags: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout__VAProcFilterParameterBufferDeinterlacing() {
assert_eq!(::std::mem::size_of::<_VAProcFilterParameterBufferDeinterlacing>()
, 12usize);
assert_eq!(::std::mem::align_of::<_VAProcFilterParameterBufferDeinterlacing>()
, 4usize);
}
impl Clone for _VAProcFilterParameterBufferDeinterlacing {
fn clone(&self) -> Self { *self }
}
pub type VAProcFilterParameterBufferDeinterlacing =
_VAProcFilterParameterBufferDeinterlacing;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAProcFilterParameterBufferColorBalance {
pub type_: VAProcFilterType,
pub attrib: VAProcColorBalanceType,
pub value: f32,
}
#[test]
fn bindgen_test_layout__VAProcFilterParameterBufferColorBalance() {
assert_eq!(::std::mem::size_of::<_VAProcFilterParameterBufferColorBalance>()
, 12usize);
assert_eq!(::std::mem::align_of::<_VAProcFilterParameterBufferColorBalance>()
, 4usize);
}
impl Clone for _VAProcFilterParameterBufferColorBalance {
fn clone(&self) -> Self { *self }
}
pub type VAProcFilterParameterBufferColorBalance =
_VAProcFilterParameterBufferColorBalance;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAProcFilterCap {
pub range: VAProcFilterValueRange,
}
#[test]
fn bindgen_test_layout__VAProcFilterCap() {
assert_eq!(::std::mem::size_of::<_VAProcFilterCap>() , 16usize);
assert_eq!(::std::mem::align_of::<_VAProcFilterCap>() , 4usize);
}
impl Clone for _VAProcFilterCap {
fn clone(&self) -> Self { *self }
}
pub type VAProcFilterCap = _VAProcFilterCap;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAProcFilterCapDeinterlacing {
pub type_: VAProcDeinterlacingType,
}
#[test]
fn bindgen_test_layout__VAProcFilterCapDeinterlacing() {
assert_eq!(::std::mem::size_of::<_VAProcFilterCapDeinterlacing>() ,
4usize);
assert_eq!(::std::mem::align_of::<_VAProcFilterCapDeinterlacing>() ,
4usize);
}
impl Clone for _VAProcFilterCapDeinterlacing {
fn clone(&self) -> Self { *self }
}
pub type VAProcFilterCapDeinterlacing = _VAProcFilterCapDeinterlacing;
#[repr(C)]
#[derive(Debug, Copy)]
pub struct _VAProcFilterCapColorBalance {
pub type_: VAProcColorBalanceType,
pub range: VAProcFilterValueRange,
}
#[test]
fn bindgen_test_layout__VAProcFilterCapColorBalance() {
assert_eq!(::std::mem::size_of::<_VAProcFilterCapColorBalance>() ,
20usize);
assert_eq!(::std::mem::align_of::<_VAProcFilterCapColorBalance>() ,
4usize);
}
impl Clone for _VAProcFilterCapColorBalance {
fn clone(&self) -> Self { *self }
}
pub type VAProcFilterCapColorBalance = _VAProcFilterCapColorBalance;
extern "C" {
pub fn vaQueryVideoProcFilters(dpy: VADisplay, context: VAContextID,
filters: *mut VAProcFilterType,
num_filters: *mut ::std::os::raw::c_uint)
-> VAStatus;
}
extern "C" {
pub fn vaQueryVideoProcFilterCaps(dpy: VADisplay, context: VAContextID,
type_: VAProcFilterType,
filter_caps:
*mut ::std::os::raw::c_void,
num_filter_caps:
*mut ::std::os::raw::c_uint)
-> VAStatus;
}
extern "C" {
pub fn vaQueryVideoProcPipelineCaps(dpy: VADisplay, context: VAContextID,
filters: *mut VABufferID,
num_filters: ::std::os::raw::c_uint,
pipeline_caps:
*mut VAProcPipelineCaps)
-> VAStatus;
}
|
_VAEncPictureParameterBufferHEVC__bindgen_ty_1
|
main.go
|
// Copyright 2019-present Facebook
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
entopts "entgo.io/contrib/entproto/cmd/protoc-gen-ent/options/ent"
"entgo.io/contrib/schemast"
"entgo.io/ent"
"entgo.io/ent/schema/edge"
"entgo.io/ent/schema/field"
"google.golang.org/protobuf/compiler/protogen"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
)
var schemaDir *string
func main() {
var flags flag.FlagSet
schemaDir = flags.String("schemadir", "./ent/schema", "path to ent schema dir")
protogen.Options{
ParamFunc: flags.Set,
}.Run(func(gen *protogen.Plugin) error {
return printSchemas(*schemaDir, gen)
})
}
func printSchemas(schemaDir string, gen *protogen.Plugin) error {
ctx, err := schemast.Load(schemaDir)
if err != nil {
return err
}
var mutations []schemast.Mutator
for _, f := range gen.Files {
if !f.Generate {
continue
}
// TODO(rotemtam): handle nested messages recursively?
for _, msg := range f.Messages {
opts, ok := schemaOpts(msg)
if !ok || !opts.GetGen() {
continue
}
schema, err := toSchema(msg, opts)
if err != nil {
return err
}
mutations = append(mutations, schema)
}
}
if err := schemast.Mutate(ctx, mutations...); err != nil {
return err
}
if err := ctx.Print(schemaDir, schemast.Header("File updated by protoc-gen-ent.")); err != nil {
return err
}
return nil
}
func schemaOpts(msg *protogen.Message) (*entopts.Schema, bool) {
opts, ok := msg.Desc.Options().(*descriptorpb.MessageOptions)
if !ok {
return nil, false
}
extension := proto.GetExtension(opts, entopts.E_Schema)
mop, ok := extension.(*entopts.Schema)
return mop, ok
}
func fieldOpts(fld *protogen.Field) (*entopts.Field, bool) {
opts, ok := fld.Desc.Options().(*descriptorpb.FieldOptions)
if !ok {
return nil, false
}
extension := proto.GetExtension(opts, entopts.E_Field)
fop, ok := extension.(*entopts.Field)
return fop, ok
}
func edgeOpts(fld *protogen.Field) (*entopts.Edge, bool) {
opts, ok := fld.Desc.Options().(*descriptorpb.FieldOptions)
if !ok || opts == nil {
return nil, false
}
extension := proto.GetExtension(opts, entopts.E_Edge)
eop, ok := extension.(*entopts.Edge)
return eop, ok
}
func toSchema(m *protogen.Message, opts *entopts.Schema) (*schemast.UpsertSchema, error) {
name := string(m.Desc.Name())
if opts.Name != nil {
name = opts.GetName()
}
out := &schemast.UpsertSchema{
Name: name,
}
for _, f := range m.Fields {
|
}
out.Edges = append(out.Edges, edg)
continue
}
fld, err := toField(f)
if err != nil {
return nil, err
}
out.Fields = append(out.Fields, fld)
}
return out, nil
}
func isEdge(f *protogen.Field) bool {
return f.Desc.Kind() == protoreflect.MessageKind
}
func toEdge(f *protogen.Field) (ent.Edge, error) {
name := string(f.Desc.Name())
msgType := string(f.Desc.Message().Name())
opts, ok := edgeOpts(f)
if !ok {
return nil, fmt.Errorf("protoc-gen-ent: expected ent.edge option on field %q", name)
}
var e ent.Edge
switch {
// TODO(rotemtam): handle O2O/M2M same type
case opts.Ref != nil:
e = edge.From(name, placeholder.Type)
default:
e = edge.To(name, placeholder.Type)
}
e = withType(e, msgType)
applyEdgeOpts(e, opts)
return e, nil
}
func toField(f *protogen.Field) (ent.Field, error) {
name := string(f.Desc.Name())
var fld ent.Field
switch f.Desc.Kind() {
case protoreflect.StringKind:
fld = field.String(name)
case protoreflect.BoolKind:
fld = field.Bool(name)
case protoreflect.Sint32Kind:
fld = field.Int32(name)
case protoreflect.Uint32Kind:
fld = field.Uint32(name)
case protoreflect.Int64Kind:
fld = field.Int64(name)
case protoreflect.Sint64Kind:
fld = field.Int64(name)
case protoreflect.Uint64Kind:
fld = field.Uint64(name)
case protoreflect.Sfixed32Kind:
fld = field.Int32(name)
case protoreflect.Fixed32Kind:
fld = field.Int32(name)
case protoreflect.FloatKind:
fld = field.Float(name)
case protoreflect.Sfixed64Kind:
fld = field.Int64(name)
case protoreflect.Fixed64Kind:
fld = field.Int64(name)
case protoreflect.DoubleKind:
fld = field.Float(name)
case protoreflect.BytesKind:
fld = field.Bytes(name)
case protoreflect.Int32Kind:
fld = field.Int32(name)
case protoreflect.EnumKind:
pbEnum := f.Desc.Enum().Values()
values := make([]string, 0, pbEnum.Len())
for i := 0; i < pbEnum.Len(); i++ {
values = append(values, string(pbEnum.Get(i).Name()))
}
fld = field.Enum(name).Values(values...)
default:
return nil, fmt.Errorf("protoc-gen-ent: unsupported kind %q", f.Desc.Kind())
}
if opts, ok := fieldOpts(f); ok {
applyFieldOpts(fld, opts)
}
return fld, nil
}
func applyFieldOpts(fld ent.Field, opts *entopts.Field) {
d := fld.Descriptor()
d.Nillable = opts.GetNillable()
d.Optional = opts.GetOptional()
d.Unique = opts.GetUnique()
d.Sensitive = opts.GetSensitive()
d.Immutable = opts.GetImmutable()
d.Comment = opts.GetComment()
d.Tag = opts.GetStructTag()
d.StorageKey = opts.GetStorageKey()
d.SchemaType = opts.GetSchemaType()
}
func applyEdgeOpts(edg ent.Edge, opts *entopts.Edge) {
d := edg.Descriptor()
d.Unique = opts.GetUnique()
d.RefName = opts.GetRef()
d.Required = opts.GetRequired()
d.Field = opts.GetField()
d.Tag = opts.GetStructTag()
if sk := opts.StorageKey; sk != nil {
d.StorageKey = &edge.StorageKey{
Table: sk.GetTable(),
Columns: sk.GetColumns(),
}
}
}
type placeholder struct {
}
func (placeholder) Type() {
}
func withType(edg ent.Edge, tn string) ent.Edge {
edg.Descriptor().Type = tn
return edg
}
|
if isEdge(f) {
edg, err := toEdge(f)
if err != nil {
return nil, err
|
lasso.py
|
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/lasso.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def lasso(X: OperationNode, y: OperationNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:
"""
:param X: input feature matrix
|
:param maxi: maximum number of iterations until convergence
:return: 'OperationNode' containing
"""
X._check_matrix_op()
y._check_matrix_op()
params_dict = {'X':X, 'y':y}
params_dict.update(kwargs)
return OperationNode(X.sds_context, 'lasso', named_input_nodes=params_dict, output_type=OutputType.MATRIX)
|
:param y: matrix Y columns of the design matrix
:param tol: target convergence tolerance
:param M: history length
:param tau: regularization component
|
solexa.go
|
// Copyright ©2011-2012 The bíogo Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package quality
import (
"github.com/biogo/biogo/alphabet"
|
// A slice of quality scores that satisfies the alphabet.Slice interface.
type Qsolexas []alphabet.Qsolexa
func (q Qsolexas) Make(len, cap int) alphabet.Slice { return make(Qsolexas, len, cap) }
func (q Qsolexas) Len() int { return len(q) }
func (q Qsolexas) Cap() int { return cap(q) }
func (q Qsolexas) Slice(start, end int) alphabet.Slice { return q[start:end] }
func (q Qsolexas) Append(a alphabet.Slice) alphabet.Slice {
return append(q, a.(Qsolexas)...)
}
func (q Qsolexas) Copy(a alphabet.Slice) int { return copy(q, a.(Qsolexas)) }
type Solexa struct {
seq.Annotation
Qual Qsolexas
Encode alphabet.Encoding
}
// Create a new scoring type.
func NewSolexa(id string, q []alphabet.Qsolexa, encode alphabet.Encoding) *Solexa {
return &Solexa{
Annotation: seq.Annotation{ID: id},
Qual: append([]alphabet.Qsolexa(nil), q...),
Encode: encode,
}
}
// Returns the underlying quality score slice.
func (q *Solexa) Slice() alphabet.Slice { return q.Qual }
// Set the underlying quality score slice.
func (q *Solexa) SetSlice(sl alphabet.Slice) { q.Qual = sl.(Qsolexas) }
// Append to the scores.
func (q *Solexa) Append(a ...alphabet.Qsolexa) { q.Qual = append(q.Qual, a...) }
// Return the raw score at position pos.
func (q *Solexa) At(i int) alphabet.Qsolexa { return q.Qual[i-q.Offset] }
// Return the error probability at position pos.
func (q *Solexa) EAt(i int) float64 { return q.Qual[i-q.Offset].ProbE() }
// Set the raw score at position pos to qual.
func (q *Solexa) Set(i int, qual alphabet.Qsolexa) error { q.Qual[i-q.Offset] = qual; return nil }
// Set the error probability to e at position pos.
func (q *Solexa) SetE(i int, e float64) error {
q.Qual[i-q.Offset] = alphabet.Esolexa(e)
return nil
}
// Encode the quality at position pos to a letter based on the sequence Encode setting.
func (q *Solexa) QEncode(i int) byte {
return q.Qual[i-q.Offset].Encode(q.Encode)
}
// Decode a quality letter to a phred score based on the sequence Encode setting.
func (q *Solexa) QDecode(l byte) alphabet.Qsolexa { return q.Encode.DecodeToQsolexa(l) }
// Return the quality Encode type.
func (q *Solexa) Encoding() alphabet.Encoding { return q.Encode }
// Set the quality Encode type to e.
func (q *Solexa) SetEncoding(e alphabet.Encoding) error { q.Encode = e; return nil }
// Return the lenght of the score sequence.
func (q *Solexa) Len() int { return len(q.Qual) }
// Return the start position of the score sequence.
func (q *Solexa) Start() int { return q.Offset }
// Return the end position of the score sequence.
func (q *Solexa) End() int { return q.Offset + q.Len() }
// Return a copy of the quality sequence.
func (q *Solexa) Copy() seq.Quality {
c := *q
c.Qual = append([]alphabet.Qsolexa(nil), q.Qual...)
return &c
}
// Reverse the order of elements in the sequence.
func (q *Solexa) Reverse() {
l := q.Qual
for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 {
l[i], l[j] = l[j], l[i]
}
}
func (q *Solexa) String() string {
qs := make([]byte, 0, len(q.Qual))
for _, s := range q.Qual {
qs = append(qs, s.Encode(q.Encode))
}
return string(qs)
}
|
"github.com/biogo/biogo/seq"
)
|
arabic.rs
|
//! This file was generated automatically by the Snowball to Rust compiler
//! http://snowballstem.org/
#![allow(non_upper_case_globals)]
#![allow(non_snake_case)]
#![allow(unused_variables)]
#![allow(unused_mut)]
use snowball::SnowballEnv;
use snowball::Among;
static A_0: &'static [Among<Context>; 161] = &[
Among("!", -1, 3, None),
Among("\"", -1, 3, None),
Among("%", -1, 3, None),
Among("*", -1, 3, None),
Among(",", -1, 3, None),
Among(".", -1, 3, None),
Among("/", -1, 3, None),
Among(":", -1, 3, None),
Among(";", -1, 3, None),
Among("?", -1, 3, None),
Among("\\", -1, 3, None),
Among("\u{060C}", -1, 4, None),
Among("\u{061B}", -1, 4, None),
Among("\u{061F}", -1, 4, None),
Among("\u{0640}", -1, 2, None),
Among("\u{064B}", -1, 1, None),
Among("\u{064C}", -1, 1, None),
Among("\u{064D}", -1, 1, None),
Among("\u{064E}", -1, 1, None),
Among("\u{064F}", -1, 1, None),
Among("\u{0650}", -1, 1, None),
Among("\u{0651}", -1, 1, None),
Among("\u{0652}", -1, 1, None),
Among("\u{0660}", -1, 5, None),
Among("\u{0661}", -1, 6, None),
Among("\u{0662}", -1, 7, None),
Among("\u{0663}", -1, 8, None),
Among("\u{0664}", -1, 9, None),
Among("\u{0665}", -1, 10, None),
Among("\u{0666}", -1, 11, None),
Among("\u{0667}", -1, 12, None),
Among("\u{0668}", -1, 13, None),
Among("\u{0669}", -1, 14, None),
Among("\u{066A}", -1, 15, None),
Among("\u{066B}", -1, 15, None),
Among("\u{066C}", -1, 15, None),
Among("\u{FE80}", -1, 16, None),
Among("\u{FE81}", -1, 20, None),
Among("\u{FE82}", -1, 20, None),
Among("\u{FE83}", -1, 17, None),
Among("\u{FE84}", -1, 17, None),
Among("\u{FE85}", -1, 21, None),
Among("\u{FE86}", -1, 21, None),
Among("\u{FE87}", -1, 18, None),
Among("\u{FE88}", -1, 18, None),
Among("\u{FE89}", -1, 19, None),
Among("\u{FE8A}", -1, 19, None),
Among("\u{FE8B}", -1, 19, None),
Among("\u{FE8C}", -1, 19, None),
Among("\u{FE8D}", -1, 22, None),
Among("\u{FE8E}", -1, 22, None),
Among("\u{FE8F}", -1, 23, None),
Among("\u{FE90}", -1, 23, None),
Among("\u{FE91}", -1, 23, None),
Among("\u{FE92}", -1, 23, None),
Among("\u{FE93}", -1, 24, None),
Among("\u{FE94}", -1, 24, None),
Among("\u{FE95}", -1, 25, None),
Among("\u{FE96}", -1, 25, None),
Among("\u{FE97}", -1, 25, None),
Among("\u{FE98}", -1, 25, None),
Among("\u{FE99}", -1, 26, None),
Among("\u{FE9A}", -1, 26, None),
Among("\u{FE9B}", -1, 26, None),
Among("\u{FE9C}", -1, 26, None),
Among("\u{FE9D}", -1, 27, None),
Among("\u{FE9E}", -1, 27, None),
Among("\u{FE9F}", -1, 27, None),
Among("\u{FEA0}", -1, 27, None),
Among("\u{FEA1}", -1, 28, None),
Among("\u{FEA2}", -1, 28, None),
Among("\u{FEA3}", -1, 28, None),
Among("\u{FEA4}", -1, 28, None),
Among("\u{FEA5}", -1, 29, None),
Among("\u{FEA6}", -1, 29, None),
Among("\u{FEA7}", -1, 29, None),
Among("\u{FEA8}", -1, 29, None),
Among("\u{FEA9}", -1, 30, None),
Among("\u{FEAA}", -1, 30, None),
Among("\u{FEAB}", -1, 31, None),
Among("\u{FEAC}", -1, 31, None),
Among("\u{FEAD}", -1, 32, None),
Among("\u{FEAE}", -1, 32, None),
Among("\u{FEAF}", -1, 33, None),
Among("\u{FEB0}", -1, 33, None),
Among("\u{FEB1}", -1, 34, None),
Among("\u{FEB2}", -1, 34, None),
Among("\u{FEB3}", -1, 34, None),
Among("\u{FEB4}", -1, 34, None),
Among("\u{FEB5}", -1, 35, None),
Among("\u{FEB6}", -1, 35, None),
Among("\u{FEB7}", -1, 35, None),
Among("\u{FEB8}", -1, 35, None),
Among("\u{FEB9}", -1, 36, None),
Among("\u{FEBA}", -1, 36, None),
Among("\u{FEBB}", -1, 36, None),
Among("\u{FEBC}", -1, 36, None),
Among("\u{FEBD}", -1, 37, None),
Among("\u{FEBE}", -1, 37, None),
Among("\u{FEBF}", -1, 37, None),
Among("\u{FEC0}", -1, 37, None),
Among("\u{FEC1}", -1, 38, None),
Among("\u{FEC2}", -1, 38, None),
Among("\u{FEC3}", -1, 38, None),
Among("\u{FEC4}", -1, 38, None),
Among("\u{FEC5}", -1, 39, None),
Among("\u{FEC6}", -1, 39, None),
Among("\u{FEC7}", -1, 39, None),
Among("\u{FEC8}", -1, 39, None),
Among("\u{FEC9}", -1, 40, None),
Among("\u{FECA}", -1, 40, None),
Among("\u{FECB}", -1, 40, None),
Among("\u{FECC}", -1, 40, None),
Among("\u{FECD}", -1, 41, None),
Among("\u{FECE}", -1, 41, None),
Among("\u{FECF}", -1, 41, None),
Among("\u{FED0}", -1, 41, None),
Among("\u{FED1}", -1, 42, None),
Among("\u{FED2}", -1, 42, None),
Among("\u{FED3}", -1, 42, None),
Among("\u{FED4}", -1, 42, None),
Among("\u{FED5}", -1, 43, None),
Among("\u{FED6}", -1, 43, None),
Among("\u{FED7}", -1, 43, None),
Among("\u{FED8}", -1, 43, None),
Among("\u{FED9}", -1, 44, None),
Among("\u{FEDA}", -1, 44, None),
Among("\u{FEDB}", -1, 44, None),
Among("\u{FEDC}", -1, 44, None),
Among("\u{FEDD}", -1, 45, None),
Among("\u{FEDE}", -1, 45, None),
Among("\u{FEDF}", -1, 45, None),
Among("\u{FEE0}", -1, 45, None),
Among("\u{FEE1}", -1, 46, None),
Among("\u{FEE2}", -1, 46, None),
Among("\u{FEE3}", -1, 46, None),
Among("\u{FEE4}", -1, 46, None),
Among("\u{FEE5}", -1, 47, None),
Among("\u{FEE6}", -1, 47, None),
Among("\u{FEE7}", -1, 47, None),
Among("\u{FEE8}", -1, 47, None),
Among("\u{FEE9}", -1, 48, None),
Among("\u{FEEA}", -1, 48, None),
Among("\u{FEEB}", -1, 48, None),
Among("\u{FEEC}", -1, 48, None),
Among("\u{FEED}", -1, 49, None),
Among("\u{FEEE}", -1, 49, None),
Among("\u{FEEF}", -1, 50, None),
Among("\u{FEF0}", -1, 50, None),
Among("\u{FEF1}", -1, 51, None),
Among("\u{FEF2}", -1, 51, None),
Among("\u{FEF3}", -1, 51, None),
Among("\u{FEF4}", -1, 51, None),
Among("\u{FEF5}", -1, 55, None),
Among("\u{FEF6}", -1, 55, None),
Among("\u{FEF7}", -1, 53, None),
Among("\u{FEF8}", -1, 53, None),
Among("\u{FEF9}", -1, 54, None),
Among("\u{FEFA}", -1, 54, None),
Among("\u{FEFB}", -1, 52, None),
Among("\u{FEFC}", -1, 52, None),
];
static A_1: &'static [Among<Context>; 5] = &[
Among("\u{0622}", -1, 1, None),
Among("\u{0623}", -1, 1, None),
Among("\u{0624}", -1, 2, None),
Among("\u{0625}", -1, 1, None),
Among("\u{0626}", -1, 3, None),
];
static A_2: &'static [Among<Context>; 5] = &[
Among("\u{0622}", -1, 1, None),
Among("\u{0623}", -1, 1, None),
Among("\u{0624}", -1, 2, None),
Among("\u{0625}", -1, 1, None),
Among("\u{0626}", -1, 3, None),
];
static A_3: &'static [Among<Context>; 4] = &[
Among("\u{0627}\u{0644}", -1, 2, None),
Among("\u{0628}\u{0627}\u{0644}", -1, 1, None),
Among("\u{0643}\u{0627}\u{0644}", -1, 1, None),
Among("\u{0644}\u{0644}", -1, 2, None),
];
static A_4: &'static [Among<Context>; 5] = &[
Among("\u{0623}\u{0622}", -1, 2, None),
Among("\u{0623}\u{0623}", -1, 1, None),
Among("\u{0623}\u{0624}", -1, 3, None),
Among("\u{0623}\u{0625}", -1, 5, None),
Among("\u{0623}\u{0627}", -1, 4, None),
];
static A_5: &'static [Among<Context>; 2] = &[
Among("\u{0641}", -1, 1, None),
Among("\u{0648}", -1, 2, None),
];
static A_6: &'static [Among<Context>; 4] = &[
Among("\u{0627}\u{0644}", -1, 2, None),
Among("\u{0628}\u{0627}\u{0644}", -1, 1, None),
Among("\u{0643}\u{0627}\u{0644}", -1, 1, None),
Among("\u{0644}\u{0644}", -1, 2, None),
];
static A_7: &'static [Among<Context>; 3] = &[
Among("\u{0628}", -1, 1, None),
Among("\u{0628}\u{0628}", 0, 2, None),
Among("\u{0643}\u{0643}", -1, 3, None),
];
static A_8: &'static [Among<Context>; 4] = &[
Among("\u{0633}\u{0623}", -1, 4, None),
Among("\u{0633}\u{062A}", -1, 2, None),
Among("\u{0633}\u{0646}", -1, 3, None),
Among("\u{0633}\u{064A}", -1, 1, None),
];
static A_9: &'static [Among<Context>; 3] = &[
Among("\u{062A}\u{0633}\u{062A}", -1, 1, None),
Among("\u{0646}\u{0633}\u{062A}", -1, 1, None),
Among("\u{064A}\u{0633}\u{062A}", -1, 1, None),
];
static A_10: &'static [Among<Context>; 10] = &[
Among("\u{0643}", -1, 1, None),
Among("\u{0643}\u{0645}", -1, 2, None),
Among("\u{0647}\u{0645}", -1, 2, None),
Among("\u{0647}\u{0646}", -1, 2, None),
Among("\u{0647}", -1, 1, None),
Among("\u{064A}", -1, 1, None),
Among("\u{0643}\u{0645}\u{0627}", -1, 3, None),
Among("\u{0647}\u{0645}\u{0627}", -1, 3, None),
Among("\u{0646}\u{0627}", -1, 2, None),
Among("\u{0647}\u{0627}", -1, 2, None),
];
static A_11: &'static [Among<Context>; 1] = &[
Among("\u{0646}", -1, 1, None),
];
static A_12: &'static [Among<Context>; 3] = &[
Among("\u{0648}", -1, 1, None),
Among("\u{064A}", -1, 1, None),
Among("\u{0627}", -1, 1, None),
];
static A_13: &'static [Among<Context>; 1] = &[
Among("\u{0627}\u{062A}", -1, 1, None),
];
static A_14: &'static [Among<Context>; 1] = &[
Among("\u{062A}", -1, 1, None),
];
static A_15: &'static [Among<Context>; 1] = &[
Among("\u{0629}", -1, 1, None),
];
static A_16: &'static [Among<Context>; 1] = &[
Among("\u{064A}", -1, 1, None),
];
static A_17: &'static [Among<Context>; 12] = &[
Among("\u{0643}", -1, 1, None),
Among("\u{0643}\u{0645}", -1, 2, None),
Among("\u{0647}\u{0645}", -1, 2, None),
Among("\u{0643}\u{0646}", -1, 2, None),
Among("\u{0647}\u{0646}", -1, 2, None),
Among("\u{0647}", -1, 1, None),
Among("\u{0643}\u{0645}\u{0648}", -1, 3, None),
Among("\u{0646}\u{064A}", -1, 2, None),
Among("\u{0643}\u{0645}\u{0627}", -1, 3, None),
Among("\u{0647}\u{0645}\u{0627}", -1, 3, None),
Among("\u{0646}\u{0627}", -1, 2, None),
Among("\u{0647}\u{0627}", -1, 2, None),
];
static A_18: &'static [Among<Context>; 11] = &[
Among("\u{0646}", -1, 2, None),
Among("\u{0648}\u{0646}", 0, 4, None),
Among("\u{064A}\u{0646}", 0, 4, None),
Among("\u{0627}\u{0646}", 0, 4, None),
Among("\u{062A}\u{0646}", 0, 3, None),
Among("\u{064A}", -1, 2, None),
Among("\u{0627}", -1, 2, None),
Among("\u{062A}\u{0645}\u{0627}", 6, 5, None),
Among("\u{0646}\u{0627}", 6, 3, None),
Among("\u{062A}\u{0627}", 6, 3, None),
Among("\u{062A}", -1, 1, None),
];
static A_19: &'static [Among<Context>; 2] = &[
Among("\u{062A}\u{0645}", -1, 1, None),
Among("\u{0648}\u{0627}", -1, 1, None),
];
static A_20: &'static [Among<Context>; 2] = &[
Among("\u{0648}", -1, 1, None),
Among("\u{062A}\u{0645}\u{0648}", 0, 2, None),
];
static A_21: &'static [Among<Context>; 1] = &[
Among("\u{0649}", -1, 1, None),
];
#[derive(Clone)]
struct Context {
b_is_defined: bool,
b_is_verb: bool,
b_is_noun: bool,
i_word_len: usize,
}
fn r_Normalize_pre(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 258
// loop, line 259
for _ in 0..env.current.chars().count() {
// (, line 259
// or, line 328
'lab0: loop {
let v_2 = env.cursor;
'lab1: loop {
// (, line 260
// [, line 261
env.bra = env.cursor;
// substring, line 261
among_var = env.find_among(A_0, context);
if among_var == 0 {
break 'lab1;
}
// ], line 261
env.ket = env.cursor;
if among_var == 0 {
break 'lab1;
} else if among_var == 1 {
// (, line 262
// delete, line 262
if !env.slice_del() {
return false;
}
} else if among_var == 2 {
// (, line 263
// delete, line 263
if !env.slice_del() {
return false;
}
} else if among_var == 3 {
// (, line 266
// delete, line 266
if !env.slice_del() {
return false;
}
} else if among_var == 4 {
// (, line 267
// delete, line 267
if !env.slice_del() {
return false;
}
} else if among_var == 5 {
// (, line 270
// <-, line 270
if !env.slice_from("0") {
return false;
}
} else if among_var == 6 {
// (, line 271
// <-, line 271
if !env.slice_from("1") {
return false;
}
} else if among_var == 7 {
// (, line 272
// <-, line 272
if !env.slice_from("2") {
return false;
}
} else if among_var == 8 {
// (, line 273
// <-, line 273
if !env.slice_from("3") {
return false;
}
} else if among_var == 9 {
// (, line 274
// <-, line 274
if !env.slice_from("4") {
return false;
}
} else if among_var == 10 {
// (, line 275
// <-, line 275
if !env.slice_from("5") {
return false;
}
} else if among_var == 11 {
// (, line 276
// <-, line 276
if !env.slice_from("6") {
return false;
}
} else if among_var == 12 {
// (, line 277
// <-, line 277
if !env.slice_from("7") {
return false;
}
} else if among_var == 13 {
// (, line 278
// <-, line 278
if !env.slice_from("8") {
return false;
}
} else if among_var == 14 {
// (, line 279
// <-, line 279
if !env.slice_from("9") {
return false;
}
} else if among_var == 15 {
// (, line 280
// delete, line 280
if !env.slice_del() {
return false;
}
} else if among_var == 16 {
// (, line 283
// <-, line 283
if !env.slice_from("\u{0621}") {
return false;
}
} else if among_var == 17 {
// (, line 284
// <-, line 284
if !env.slice_from("\u{0623}") {
return false;
}
} else if among_var == 18 {
// (, line 285
// <-, line 285
if !env.slice_from("\u{0625}") {
return false;
}
} else if among_var == 19 {
// (, line 286
// <-, line 286
if !env.slice_from("\u{0626}") {
return false;
}
} else if among_var == 20 {
// (, line 287
// <-, line 287
if !env.slice_from("\u{0622}") {
return false;
}
} else if among_var == 21 {
// (, line 288
// <-, line 288
if !env.slice_from("\u{0624}") {
return false;
}
} else if among_var == 22 {
// (, line 289
// <-, line 289
if !env.slice_from("\u{0627}") {
return false;
}
} else if among_var == 23 {
// (, line 290
// <-, line 290
if !env.slice_from("\u{0628}") {
return false;
}
} else if among_var == 24 {
// (, line 291
// <-, line 291
if !env.slice_from("\u{0629}") {
return false;
}
} else if among_var == 25 {
// (, line 292
// <-, line 292
if !env.slice_from("\u{062A}") {
return false;
}
} else if among_var == 26 {
// (, line 293
// <-, line 293
if !env.slice_from("\u{062B}") {
return false;
}
} else if among_var == 27 {
// (, line 294
// <-, line 294
if !env.slice_from("\u{062C}") {
return false;
}
} else if among_var == 28 {
// (, line 295
// <-, line 295
if !env.slice_from("\u{062D}") {
return false;
}
} else if among_var == 29 {
// (, line 296
// <-, line 296
if !env.slice_from("\u{062E}") {
return false;
}
} else if among_var == 30 {
// (, line 297
// <-, line 297
if !env.slice_from("\u{062F}") {
return false;
}
} else if among_var == 31 {
// (, line 298
// <-, line 298
if !env.slice_from("\u{0630}") {
return false;
}
} else if among_var == 32 {
// (, line 299
// <-, line 299
if !env.slice_from("\u{0631}") {
return false;
}
} else if among_var == 33 {
// (, line 300
// <-, line 300
if !env.slice_from("\u{0632}") {
return false;
}
} else if among_var == 34 {
// (, line 301
// <-, line 301
if !env.slice_from("\u{0633}") {
return false;
}
} else if among_var == 35 {
// (, line 302
// <-, line 302
if !env.slice_from("\u{0634}") {
return false;
}
} else if among_var == 36 {
// (, line 303
// <-, line 303
if !env.slice_from("\u{0635}") {
return false;
}
} else if among_var == 37 {
// (, line 304
// <-, line 304
if !env.slice_from("\u{0636}") {
return false;
}
} else if among_var == 38 {
// (, line 305
// <-, line 305
if !env.slice_from("\u{0637}") {
return false;
}
} else if among_var == 39 {
// (, line 306
// <-, line 306
if !env.slice_from("\u{0638}") {
return false;
}
} else if among_var == 40 {
// (, line 307
// <-, line 307
if !env.slice_from("\u{0639}") {
return false;
}
} else if among_var == 41 {
// (, line 308
// <-, line 308
if !env.slice_from("\u{063A}") {
return false;
}
} else if among_var == 42 {
// (, line 309
// <-, line 309
if !env.slice_from("\u{0641}") {
return false;
}
} else if among_var == 43 {
// (, line 310
// <-, line 310
if !env.slice_from("\u{0642}") {
return false;
}
} else if among_var == 44 {
// (, line 311
// <-, line 311
if !env.slice_from("\u{0643}") {
return false;
}
} else if among_var == 45 {
// (, line 312
// <-, line 312
if !env.slice_from("\u{0644}") {
return false;
}
} else if among_var == 46 {
// (, line 313
// <-, line 313
if !env.slice_from("\u{0645}") {
return false;
}
} else if among_var == 47 {
// (, line 314
// <-, line 314
if !env.slice_from("\u{0646}") {
return false;
}
} else if among_var == 48 {
// (, line 315
// <-, line 315
if !env.slice_from("\u{0647}") {
return false;
}
} else if among_var == 49 {
// (, line 316
// <-, line 316
if !env.slice_from("\u{0648}") {
return false;
}
} else if among_var == 50 {
// (, line 317
// <-, line 317
if !env.slice_from("\u{0649}") {
return false;
}
} else if among_var == 51 {
// (, line 318
// <-, line 318
if !env.slice_from("\u{064A}") {
return false;
}
} else if among_var == 52 {
// (, line 321
// <-, line 321
if !env.slice_from("\u{0644}\u{0627}") {
return false;
}
} else if among_var == 53 {
// (, line 322
// <-, line 322
if !env.slice_from("\u{0644}\u{0623}") {
return false;
}
} else if among_var == 54 {
// (, line 323
// <-, line 323
if !env.slice_from("\u{0644}\u{0625}") {
return false;
}
} else if among_var == 55 {
// (, line 324
// <-, line 324
if !env.slice_from("\u{0644}\u{0622}") {
return false;
}
}
break 'lab0;
}
env.cursor = v_2;
// next, line 329
if env.cursor >= env.limit {
return false;
}
env.next_char();
break 'lab0;
}
}
return true;
}
fn r_Normalize_post(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 333
// do, line 335
let v_1 = env.cursor;
'lab0: loop {
// (, line 335
// backwards, line 337
env.limit_backward = env.cursor;
env.cursor = env.limit;
// (, line 337
// [, line 338
env.ket = env.cursor;
// substring, line 338
among_var = env.find_among_b(A_1, context);
if among_var == 0 {
break 'lab0;
}
// ], line 338
env.bra = env.cursor;
if among_var == 0 {
break 'lab0;
} else if among_var == 1 {
// (, line 339
// <-, line 339
if !env.slice_from("\u{0621}") {
return false;
}
} else if among_var == 2 {
// (, line 340
// <-, line 340
if !env.slice_from("\u{0621}") {
return false;
}
} else if among_var == 3 {
// (, line 341
// <-, line 341
if !env.slice_from("\u{0621}") {
return false;
}
}
env.cursor = env.limit_backward;
break 'lab0;
}
env.cursor = v_1;
// do, line 346
let v_2 = env.cursor;
'lab1: loop {
// loop, line 346
for _ in 0..context.i_word_len {
// (, line 346
// or, line 355
'lab2: loop {
let v_4 = env.cursor;
'lab3: loop {
// (, line 347
// [, line 349
env.bra = env.cursor;
// substring, line 349
among_var = env.find_among(A_2, context);
if among_var == 0 {
break 'lab3;
}
// ], line 349
env.ket = env.cursor;
if among_var == 0 {
break 'lab3;
} else if among_var == 1 {
// (, line 350
// <-, line 350
if !env.slice_from("\u{0627}") {
return false;
}
} else if among_var == 2 {
// (, line 351
// <-, line 351
if !env.slice_from("\u{0648}") {
return false;
}
} else if among_var == 3 {
// (, line 352
// <-, line 352
if !env.slice_from("\u{064A}") {
return false;
}
}
break 'lab2;
}
env.cursor = v_4;
// next, line 356
if env.cursor >= env.limit {
break 'lab1;
}
env.next_char();
break 'lab2;
}
}
break 'lab1;
}
env.cursor = v_2;
return true;
}
fn r_Checks1(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 361
context.i_word_len = env.current.chars().count();
// [, line 363
env.bra = env.cursor;
// substring, line 363
among_var = env.find_among(A_3, context);
if among_var == 0 {
return false;
}
// ], line 363
env.ket = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 364
if !(context.i_word_len > 4){
return false;
}
// set is_noun, line 364
context.b_is_noun = true;
// unset is_verb, line 364
context.b_is_verb = false;
// set is_defined, line 364
context.b_is_defined = true;
} else if among_var == 2 {
// (, line 365
if !(context.i_word_len > 3){
return false;
}
// set is_noun, line 365
context.b_is_noun = true;
// unset is_verb, line 365
context.b_is_verb = false;
// set is_defined, line 365
context.b_is_defined = true;
}
return true;
}
fn r_Prefix_Step1(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 371
context.i_word_len = env.current.chars().count();
// [, line 373
env.bra = env.cursor;
// substring, line 373
among_var = env.find_among(A_4, context);
if among_var == 0 {
return false;
}
// ], line 373
env.ket = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 374
if !(context.i_word_len > 3){
return false;
}
// <-, line 374
if !env.slice_from("\u{0623}") {
return false;
}
} else if among_var == 2 {
// (, line 375
if !(context.i_word_len > 3){
return false;
}
// <-, line 375
if !env.slice_from("\u{0622}") {
return false;
}
} else if among_var == 3 {
// (, line 376
if !(context.i_word_len > 3){
return false;
}
// <-, line 376
if !env.slice_from("\u{0623}") {
return false;
}
} else if among_var == 4 {
// (, line 377
if !(context.i_word_len > 3){
return false;
}
// <-, line 377
if !env.slice_from("\u{0627}") {
return false;
}
} else if among_var == 5 {
// (, line 378
if !(context.i_word_len > 3){
return false;
}
// <-, line 378
if !env.slice_from("\u{0625}") {
return false;
}
}
return true;
}
fn r_Prefix_Step2(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 383
context.i_word_len = env.current.chars().count();
// not, line 385
let v_1 = env.cursor;
'lab0: loop {
// literal, line 385
if !env.eq_s(&"\u{0641}\u{0627}") {
break 'lab0;
}
return false;
}
env.cursor = v_1;
// not, line 386
let v_2 = env.cursor;
'lab1: loop {
// literal, line 386
if !env.eq_s(&"\u{0648}\u{0627}") {
break 'lab1;
}
return false;
}
env.cursor = v_2;
// [, line 387
env.bra = env.cursor;
// substring, line 387
among_var = env.find_among(A_5, context);
if among_var == 0 {
return false;
}
// ], line 387
env.ket = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 388
if !(context.i_word_len > 3){
return false;
}
// delete, line 388
if !env.slice_del() {
return false;
}
} else if among_var == 2 {
// (, line 389
if !(context.i_word_len > 3){
return false;
}
// delete, line 389
if !env.slice_del() {
return false;
}
}
return true;
}
fn r_Prefix_Step3a_Noun(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 393
context.i_word_len = env.current.chars().count();
// [, line 395
env.bra = env.cursor;
// substring, line 395
among_var = env.find_among(A_6, context);
if among_var == 0 {
return false;
}
// ], line 395
env.ket = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 396
if !(context.i_word_len > 5){
return false;
}
// delete, line 396
if !env.slice_del() {
return false;
}
} else if among_var == 2 {
// (, line 397
if !(context.i_word_len > 4){
return false;
}
// delete, line 397
if !env.slice_del() {
return false;
}
}
return true;
}
fn r_Prefix_Step3b_Noun(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 401
context.i_word_len = env.current.chars().count();
// not, line 403
let v_1 = env.cursor;
'lab0: loop {
// literal, line 403
if !env.eq_s(&"\u{0628}\u{0627}") {
break 'lab0;
}
return false;
}
env.cursor = v_1;
// [, line 404
env.bra = env.cursor;
// substring, line 404
among_var = env.find_among(A_7, context);
if among_var == 0 {
return false;
}
// ], line 404
env.ket = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 405
if !(context.i_word_len > 3){
return false;
}
// delete, line 405
if !env.slice_del() {
return false;
}
} else if among_var == 2 {
// (, line 407
if !(context.i_word_len > 3){
return false;
}
// <-, line 407
if !env.slice_from("\u{0628}") {
return false;
}
} else if among_var == 3 {
// (, line 408
if !(context.i_word_len > 3){
return false;
}
// <-, line 408
if !env.slice_from("\u{0643}") {
return false;
}
}
return true;
}
fn r_Prefix_Step3_Verb(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 413
context.i_word_len = env.current.chars().count();
// [, line 415
env.bra = env.cursor;
// substring, line 415
among_var = env.find_among(A_8, context);
if among_var == 0 {
return false;
}
// ], line 415
env.ket = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 417
if !(context.i_word_len > 4){
return false;
}
// <-, line 417
if !env.slice_from("\u{064A}") {
return false;
}
} else if among_var == 2 {
// (, line 418
if !(context.i_word_len > 4){
return false;
}
// <-, line 418
if !env.slice_from("\u{062A}") {
return false;
}
} else if among_var == 3 {
// (, line 419
if !(context.i_word_len > 4){
return false;
}
// <-, line 419
if !env.slice_from("\u{0646}") {
return false;
}
} else if among_var == 4 {
// (, line 420
if !(context.i_word_len > 4){
return false;
}
// <-, line 420
if !env.slice_from("\u{0623}") {
return false;
}
}
return true;
}
fn r_Prefix_Step4_Verb(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 424
context.i_word_len = env.current.chars().count();
// [, line 426
env.bra = env.cursor;
// substring, line 426
among_var = env.find_among(A_9, context);
if among_var == 0 {
return false;
}
// ], line 426
env.ket = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 427
if !(context.i_word_len > 4){
return false;
}
// set is_verb, line 427
context.b_is_verb = true;
// unset is_noun, line 427
context.b_is_noun = false;
// <-, line 427
if !env.slice_from("\u{0627}\u{0633}\u{062A}") {
return false;
}
}
return true;
}
fn
|
(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 434
context.i_word_len = env.current.chars().count();
// [, line 436
env.ket = env.cursor;
// substring, line 436
among_var = env.find_among_b(A_10, context);
if among_var == 0 {
return false;
}
// ], line 436
env.bra = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 437
if !(context.i_word_len >= 4){
return false;
}
// delete, line 437
if !env.slice_del() {
return false;
}
} else if among_var == 2 {
// (, line 438
if !(context.i_word_len >= 5){
return false;
}
// delete, line 438
if !env.slice_del() {
return false;
}
} else if among_var == 3 {
// (, line 439
if !(context.i_word_len >= 6){
return false;
}
// delete, line 439
if !env.slice_del() {
return false;
}
}
return true;
}
fn r_Suffix_Noun_Step1b(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 442
context.i_word_len = env.current.chars().count();
// [, line 444
env.ket = env.cursor;
// substring, line 444
among_var = env.find_among_b(A_11, context);
if among_var == 0 {
return false;
}
// ], line 444
env.bra = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 445
if !(context.i_word_len > 5){
return false;
}
// delete, line 445
if !env.slice_del() {
return false;
}
}
return true;
}
fn r_Suffix_Noun_Step2a(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 449
context.i_word_len = env.current.chars().count();
// [, line 451
env.ket = env.cursor;
// substring, line 451
among_var = env.find_among_b(A_12, context);
if among_var == 0 {
return false;
}
// ], line 451
env.bra = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 452
if !(context.i_word_len > 4){
return false;
}
// delete, line 452
if !env.slice_del() {
return false;
}
}
return true;
}
fn r_Suffix_Noun_Step2b(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 456
context.i_word_len = env.current.chars().count();
// [, line 458
env.ket = env.cursor;
// substring, line 458
among_var = env.find_among_b(A_13, context);
if among_var == 0 {
return false;
}
// ], line 458
env.bra = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 459
if !(context.i_word_len >= 5){
return false;
}
// delete, line 459
if !env.slice_del() {
return false;
}
}
return true;
}
fn r_Suffix_Noun_Step2c1(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 463
context.i_word_len = env.current.chars().count();
// [, line 465
env.ket = env.cursor;
// substring, line 465
among_var = env.find_among_b(A_14, context);
if among_var == 0 {
return false;
}
// ], line 465
env.bra = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 466
if !(context.i_word_len >= 4){
return false;
}
// delete, line 466
if !env.slice_del() {
return false;
}
}
return true;
}
fn r_Suffix_Noun_Step2c2(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 469
context.i_word_len = env.current.chars().count();
// [, line 471
env.ket = env.cursor;
// substring, line 471
among_var = env.find_among_b(A_15, context);
if among_var == 0 {
return false;
}
// ], line 471
env.bra = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 472
if !(context.i_word_len >= 4){
return false;
}
// delete, line 472
if !env.slice_del() {
return false;
}
}
return true;
}
fn r_Suffix_Noun_Step3(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 475
context.i_word_len = env.current.chars().count();
// [, line 477
env.ket = env.cursor;
// substring, line 477
among_var = env.find_among_b(A_16, context);
if among_var == 0 {
return false;
}
// ], line 477
env.bra = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 478
if !(context.i_word_len >= 3){
return false;
}
// delete, line 478
if !env.slice_del() {
return false;
}
}
return true;
}
fn r_Suffix_Verb_Step1(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 482
context.i_word_len = env.current.chars().count();
// [, line 484
env.ket = env.cursor;
// substring, line 484
among_var = env.find_among_b(A_17, context);
if among_var == 0 {
return false;
}
// ], line 484
env.bra = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 485
if !(context.i_word_len >= 4){
return false;
}
// delete, line 485
if !env.slice_del() {
return false;
}
} else if among_var == 2 {
// (, line 486
if !(context.i_word_len >= 5){
return false;
}
// delete, line 486
if !env.slice_del() {
return false;
}
} else if among_var == 3 {
// (, line 487
if !(context.i_word_len >= 6){
return false;
}
// delete, line 487
if !env.slice_del() {
return false;
}
}
return true;
}
fn r_Suffix_Verb_Step2a(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 490
context.i_word_len = env.current.chars().count();
// [, line 492
env.ket = env.cursor;
// substring, line 492
among_var = env.find_among_b(A_18, context);
if among_var == 0 {
return false;
}
// ], line 492
env.bra = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 493
if !(context.i_word_len >= 4){
return false;
}
// delete, line 493
if !env.slice_del() {
return false;
}
} else if among_var == 2 {
// (, line 494
if !(context.i_word_len >= 4){
return false;
}
// delete, line 494
if !env.slice_del() {
return false;
}
} else if among_var == 3 {
// (, line 495
if !(context.i_word_len >= 5){
return false;
}
// delete, line 495
if !env.slice_del() {
return false;
}
} else if among_var == 4 {
// (, line 496
if !(context.i_word_len > 5){
return false;
}
// delete, line 496
if !env.slice_del() {
return false;
}
} else if among_var == 5 {
// (, line 497
if !(context.i_word_len >= 6){
return false;
}
// delete, line 497
if !env.slice_del() {
return false;
}
}
return true;
}
fn r_Suffix_Verb_Step2b(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 501
context.i_word_len = env.current.chars().count();
// [, line 503
env.ket = env.cursor;
// substring, line 503
among_var = env.find_among_b(A_19, context);
if among_var == 0 {
return false;
}
// ], line 503
env.bra = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 504
if !(context.i_word_len >= 5){
return false;
}
// delete, line 504
if !env.slice_del() {
return false;
}
}
return true;
}
fn r_Suffix_Verb_Step2c(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 509
context.i_word_len = env.current.chars().count();
// [, line 511
env.ket = env.cursor;
// substring, line 511
among_var = env.find_among_b(A_20, context);
if among_var == 0 {
return false;
}
// ], line 511
env.bra = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 512
if !(context.i_word_len >= 4){
return false;
}
// delete, line 512
if !env.slice_del() {
return false;
}
} else if among_var == 2 {
// (, line 513
if !(context.i_word_len >= 6){
return false;
}
// delete, line 513
if !env.slice_del() {
return false;
}
}
return true;
}
fn r_Suffix_All_alef_maqsura(env: &mut SnowballEnv, context: &mut Context) -> bool {
let mut among_var;
// (, line 517
context.i_word_len = env.current.chars().count();
// [, line 519
env.ket = env.cursor;
// substring, line 519
among_var = env.find_among_b(A_21, context);
if among_var == 0 {
return false;
}
// ], line 519
env.bra = env.cursor;
if among_var == 0 {
return false;
} else if among_var == 1 {
// (, line 520
// <-, line 520
if !env.slice_from("\u{064A}") {
return false;
}
}
return true;
}
pub fn stem(env: &mut SnowballEnv) -> bool {
let mut context = &mut Context {
b_is_defined: false,
b_is_verb: false,
b_is_noun: false,
i_word_len: 0,
};
// (, line 527
// set is_noun, line 529
context.b_is_noun = true;
// set is_verb, line 530
context.b_is_verb = true;
// unset is_defined, line 531
context.b_is_defined = false;
// do, line 534
let v_1 = env.cursor;
'lab0: loop {
// call Checks1, line 534
if !r_Checks1(env, context) {
break 'lab0;
}
break 'lab0;
}
env.cursor = v_1;
// do, line 537
let v_2 = env.cursor;
'lab1: loop {
// call Normalize_pre, line 537
if !r_Normalize_pre(env, context) {
break 'lab1;
}
break 'lab1;
}
env.cursor = v_2;
// backwards, line 540
env.limit_backward = env.cursor;
env.cursor = env.limit;
// (, line 540
// do, line 542
let v_3 = env.limit - env.cursor;
'lab2: loop {
// (, line 542
// or, line 556
'lab3: loop {
let v_4 = env.limit - env.cursor;
'lab4: loop {
// (, line 544
// Boolean test is_verb, line 545
if !context.b_is_verb {
break 'lab4;
}
// (, line 546
// or, line 551
'lab5: loop {
let v_5 = env.limit - env.cursor;
'lab6: loop {
// (, line 547
// (, line 548
// atleast, line 548
let mut v_6 = 1;
// atleast, line 548
'replab7: loop{
let v_7 = env.limit - env.cursor;
'lab8: for _ in 0..1 {
// call Suffix_Verb_Step1, line 548
if !r_Suffix_Verb_Step1(env, context) {
break 'lab8;
}
v_6 -= 1;
continue 'replab7;
}
env.cursor = env.limit - v_7;
break 'replab7;
}
if v_6 > 0 {
break 'lab6;
}
// (, line 549
// or, line 549
'lab9: loop {
let v_8 = env.limit - env.cursor;
'lab10: loop {
// call Suffix_Verb_Step2a, line 549
if !r_Suffix_Verb_Step2a(env, context) {
break 'lab10;
}
break 'lab9;
}
env.cursor = env.limit - v_8;
'lab11: loop {
// call Suffix_Verb_Step2c, line 549
if !r_Suffix_Verb_Step2c(env, context) {
break 'lab11;
}
break 'lab9;
}
env.cursor = env.limit - v_8;
// next, line 549
if env.cursor <= env.limit_backward {
break 'lab6;
}
env.previous_char();
break 'lab9;
}
break 'lab5;
}
env.cursor = env.limit - v_5;
'lab12: loop {
// call Suffix_Verb_Step2b, line 551
if !r_Suffix_Verb_Step2b(env, context) {
break 'lab12;
}
break 'lab5;
}
env.cursor = env.limit - v_5;
// call Suffix_Verb_Step2a, line 552
if !r_Suffix_Verb_Step2a(env, context) {
break 'lab4;
}
break 'lab5;
}
break 'lab3;
}
env.cursor = env.limit - v_4;
'lab13: loop {
// (, line 556
// Boolean test is_noun, line 557
if !context.b_is_noun {
break 'lab13;
}
// (, line 558
// try, line 560
let v_9 = env.limit - env.cursor;
'lab14: loop {
// (, line 560
// or, line 562
'lab15: loop {
let v_10 = env.limit - env.cursor;
'lab16: loop {
// call Suffix_Noun_Step2c2, line 561
if !r_Suffix_Noun_Step2c2(env, context) {
break 'lab16;
}
break 'lab15;
}
env.cursor = env.limit - v_10;
'lab17: loop {
// (, line 562
// not, line 562
'lab18: loop {
// Boolean test is_defined, line 562
if !context.b_is_defined {
break 'lab18;
}
break 'lab17;
}
// call Suffix_Noun_Step1a, line 562
if !r_Suffix_Noun_Step1a(env, context) {
break 'lab17;
}
// (, line 562
// or, line 564
'lab19: loop {
let v_12 = env.limit - env.cursor;
'lab20: loop {
// call Suffix_Noun_Step2a, line 563
if !r_Suffix_Noun_Step2a(env, context) {
break 'lab20;
}
break 'lab19;
}
env.cursor = env.limit - v_12;
'lab21: loop {
// call Suffix_Noun_Step2b, line 564
if !r_Suffix_Noun_Step2b(env, context) {
break 'lab21;
}
break 'lab19;
}
env.cursor = env.limit - v_12;
'lab22: loop {
// call Suffix_Noun_Step2c1, line 565
if !r_Suffix_Noun_Step2c1(env, context) {
break 'lab22;
}
break 'lab19;
}
env.cursor = env.limit - v_12;
// next, line 566
if env.cursor <= env.limit_backward {
break 'lab17;
}
env.previous_char();
break 'lab19;
}
break 'lab15;
}
env.cursor = env.limit - v_10;
'lab23: loop {
// (, line 567
// call Suffix_Noun_Step1b, line 567
if !r_Suffix_Noun_Step1b(env, context) {
break 'lab23;
}
// (, line 567
// or, line 569
'lab24: loop {
let v_13 = env.limit - env.cursor;
'lab25: loop {
// call Suffix_Noun_Step2a, line 568
if !r_Suffix_Noun_Step2a(env, context) {
break 'lab25;
}
break 'lab24;
}
env.cursor = env.limit - v_13;
'lab26: loop {
// call Suffix_Noun_Step2b, line 569
if !r_Suffix_Noun_Step2b(env, context) {
break 'lab26;
}
break 'lab24;
}
env.cursor = env.limit - v_13;
// call Suffix_Noun_Step2c1, line 570
if !r_Suffix_Noun_Step2c1(env, context) {
break 'lab23;
}
break 'lab24;
}
break 'lab15;
}
env.cursor = env.limit - v_10;
'lab27: loop {
// (, line 571
// not, line 571
'lab28: loop {
// Boolean test is_defined, line 571
if !context.b_is_defined {
break 'lab28;
}
break 'lab27;
}
// call Suffix_Noun_Step2a, line 571
if !r_Suffix_Noun_Step2a(env, context) {
break 'lab27;
}
break 'lab15;
}
env.cursor = env.limit - v_10;
// (, line 572
// call Suffix_Noun_Step2b, line 572
if !r_Suffix_Noun_Step2b(env, context) {
env.cursor = env.limit - v_9;
break 'lab14;
}
break 'lab15;
}
break 'lab14;
}
// call Suffix_Noun_Step3, line 574
if !r_Suffix_Noun_Step3(env, context) {
break 'lab13;
}
break 'lab3;
}
env.cursor = env.limit - v_4;
// call Suffix_All_alef_maqsura, line 580
if !r_Suffix_All_alef_maqsura(env, context) {
break 'lab2;
}
break 'lab3;
}
break 'lab2;
}
env.cursor = env.limit - v_3;
env.cursor = env.limit_backward;
// do, line 585
let v_15 = env.cursor;
'lab29: loop {
// (, line 585
// try, line 586
let v_16 = env.cursor;
'lab30: loop {
// call Prefix_Step1, line 586
if !r_Prefix_Step1(env, context) {
env.cursor = v_16;
break 'lab30;
}
break 'lab30;
}
// try, line 587
let v_17 = env.cursor;
'lab31: loop {
// call Prefix_Step2, line 587
if !r_Prefix_Step2(env, context) {
env.cursor = v_17;
break 'lab31;
}
break 'lab31;
}
// (, line 588
// or, line 589
'lab32: loop {
let v_18 = env.cursor;
'lab33: loop {
// call Prefix_Step3a_Noun, line 588
if !r_Prefix_Step3a_Noun(env, context) {
break 'lab33;
}
break 'lab32;
}
env.cursor = v_18;
'lab34: loop {
// (, line 589
// Boolean test is_noun, line 589
if !context.b_is_noun {
break 'lab34;
}
// call Prefix_Step3b_Noun, line 589
if !r_Prefix_Step3b_Noun(env, context) {
break 'lab34;
}
break 'lab32;
}
env.cursor = v_18;
// (, line 590
// Boolean test is_verb, line 590
if !context.b_is_verb {
break 'lab29;
}
// try, line 590
let v_19 = env.cursor;
'lab35: loop {
// call Prefix_Step3_Verb, line 590
if !r_Prefix_Step3_Verb(env, context) {
env.cursor = v_19;
break 'lab35;
}
break 'lab35;
}
// call Prefix_Step4_Verb, line 590
if !r_Prefix_Step4_Verb(env, context) {
break 'lab29;
}
break 'lab32;
}
break 'lab29;
}
env.cursor = v_15;
// do, line 595
let v_20 = env.cursor;
'lab36: loop {
// call Normalize_post, line 595
if !r_Normalize_post(env, context) {
break 'lab36;
}
break 'lab36;
}
env.cursor = v_20;
return true;
}
|
r_Suffix_Noun_Step1a
|
handlers.rs
|
#![allow(clippy::unreadable_literal)]
use chrono::{DateTime, NaiveDateTime, Utc};
use orgize::{
export::{DefaultHtmlHandler, HtmlEscape as Escape, HtmlHandler},
Element,
};
use std::{io::Write, path::Path, process::Command};
use syntect::{
easy::HighlightLines,
highlighting::ThemeSet,
html::{styled_line_to_highlighted_html, IncludeBackground},
parsing::SyntaxSet,
};
use crate::error::{Error, Result};
#[derive(Default)]
struct SolomonBaseHandler {
default: DefaultHtmlHandler,
last_char: Option<char>,
}
impl HtmlHandler<Error> for SolomonBaseHandler {
fn start<W: Write>(&mut self, mut w: W, element: &Element) -> Result<()> {
match element {
Element::Document { .. } => (),
Element::Macros(macros) => match ¯os.name as &str {
"age-days" => {
let date =
DateTime::from_utc(NaiveDateTime::from_timestamp(1382071200, 0), Utc);
write!(w, " {} ", (Utc::now() - date).num_days())?;
}
"angular-core-version" => {
write_nodejs_package_version(w, "@angular/core")?;
}
"angular-material-version" => {
write_nodejs_package_version(w, "@angular/material")?;
}
"angular-cli-version" => {
write_nodejs_package_version(w, "@angular/cli")?;
}
_ => (),
},
Element::Paragraph { .. } => {
self.last_char = None;
write!(w, "<p>")?;
}
Element::Link(link) => {
let text = link.desc.as_ref().unwrap_or(&link.path);
if should_insert_space(self.last_char, text.chars().next()) {
write!(w, " ")?;
}
self.last_char = text.chars().last();
write!(w, "<a href=\"{}\">{}</a>", Escape(&link.path), Escape(text))?;
}
Element::Text { value } => {
for line in value.lines() {
let text = line.trim();
if should_insert_space(self.last_char, text.chars().next()) {
write!(w, " ")?;
}
self.last_char = text.chars().last();
write!(w, "{}", Escape(text))?;
}
}
Element::Verbatim { value } | Element::Code { value } => {
let text = value.trim();
if should_insert_space(self.last_char, text.chars().next()) {
write!(w, " ")?;
}
self.last_char = text.chars().last();
write!(w, "<code>{}</code>", Escape(text))?;
}
_ => self.default.start(w, element)?,
}
Ok(())
}
fn end<W: Write>(&mut self, w: W, element: &Element) -> Result<()> {
match element {
Element::Document { .. } => (),
_ => self.default.end(w, element)?,
}
Ok(())
}
}
#[derive(Default)]
struct SolomonSyntectHandler(SolomonBaseHandler);
impl SolomonSyntectHandler {
fn highlight(&self, language: Option<&str>, content: &str) -> String {
lazy_static::lazy_static! {
static ref SYNTAX_SET: SyntaxSet = {
let set = SyntaxSet::load_defaults_newlines();
let mut builder = set.into_builder();
// add extra language syntax files
builder.add_from_folder("gen/syntax", true).unwrap();
builder.build()
};
static ref THEME_SET: ThemeSet = ThemeSet::load_defaults();
}
let language = match language {
Some("elisp") | Some("emacs-lisp") => Some("lisp"),
_ => language,
};
let mut highlighter = HighlightLines::new(
language
.and_then(|lang| SYNTAX_SET.find_syntax_by_token(lang))
.unwrap_or_else(|| SYNTAX_SET.find_syntax_plain_text()),
&THEME_SET.themes["InspiredGitHub"],
);
let regions = highlighter.highlight(content, &SYNTAX_SET);
styled_line_to_highlighted_html(®ions[..], IncludeBackground::No)
}
}
impl HtmlHandler<Error> for SolomonSyntectHandler {
fn start<W: Write>(&mut self, mut w: W, element: &Element) -> Result<()> {
match element {
Element::InlineSrc(inline_src) => write!(
w,
"<code>{}</code>",
self.highlight(Some(&inline_src.lang), &inline_src.body)
)?,
Element::SourceBlock(block) => write!(
w,
"<pre><code>{}</code></pre>",
self.highlight(Some(&block.language), &block.contents)
)?,
Element::FixedWidth(fixed_width) => write!(
w,
"<pre><code>{}</code></pre>",
self.highlight(None, &fixed_width.value)
)?,
Element::ExampleBlock(block) => write!(
w,
"<pre><code>{}</code></pre>",
self.highlight(None, &block.contents)
)?,
Element::Macros(macros) => match ¯os.name as &str {
"age-days" => {
let date =
DateTime::from_utc(NaiveDateTime::from_timestamp(1382071200, 0), Utc);
write!(w, " {} ", (Utc::now() - date).num_days())?;
}
"angular-core-version" => {
write_nodejs_package_version(w, "@angular/core")?;
}
"angular-material-version" => {
write_nodejs_package_version(w, "@angular/material")?;
}
"angular-cli-version" => {
write_nodejs_package_version(w, "@angular/cli")?;
}
_ => (),
},
_ => self.0.start(w, element)?,
}
Ok(())
}
fn end<W: Write>(&mut self, w: W, element: &Element) -> Result<()> {
self.0.end(w, element)
}
}
#[derive(Default)]
pub struct SolomonHtmlHandler(SolomonSyntectHandler);
impl HtmlHandler<Error> for SolomonHtmlHandler {
fn start<W: Write>(&mut self, mut w: W, element: &Element) -> Result<()> {
match element {
Element::Link(link) if link.path.starts_with("file:") => {
let path = &link.path[5..];
let size = imagesize::size(Path::new("content/post").join(path))?;
write!(
w,
"<div class=\"image-container\" style=\"max-width:{}px;\">\
<div class=\"image-wrapper\" style=\"padding-top:{:.7}%\">\
<img src=\"/{}\" loading=\"lazy\"></div></div>",
size.width,
(size.height as f32 / size.width as f32) * 100.,
path,
)?;
}
_ => self.0.start(w, element)?,
}
Ok(())
}
fn end<W: Write>(&mut self, w: W, element: &Element) -> Result<()> {
self.0.end(w, element)
}
}
#[derive(Default)]
pub struct SolomonRssHandler(SolomonBaseHandler);
impl HtmlHandler<Error> for SolomonRssHandler {
fn start<W: Write>(&mut self, mut w: W, element: &Element) -> Result<()> {
match element {
Element::Link(link) if link.path.starts_with("file:") => {
let path = &link.path[5..];
let size = imagesize::size(Path::new("content/post").join(path))?;
write!(
w,
r#"<img src="/{}" width="{}" height="{}">"#,
path, size.width, size.height
)?;
}
_ => self.0.start(w, element)?,
}
Ok(())
}
fn end<W: Write>(&mut self, w: W, element: &Element) -> Result<()>
|
}
#[derive(Default)]
pub struct SolomonAmpHandler(SolomonSyntectHandler);
impl HtmlHandler<Error> for SolomonAmpHandler {
fn start<W: Write>(&mut self, mut w: W, element: &Element) -> Result<()> {
match element {
Element::Link(link) if link.path.starts_with("file:") => {
let path = &link.path[5..];
let size = imagesize::size(Path::new("content/post").join(path))?;
write!(
w,
"<amp-img src=\"/{}\" width=\"{}\" height=\"{}\" layout=\"responsive\" \
class=\"i-amphtml-layout-responsive i-amphtml-layout-size-defined\" i-amphtml-layout=\"responsive\">\
<i-amphtml-sizer style=\"display:block;padding-top:{:.7}%;\"></i-amphtml-sizer></amp-img>",
path,
size.width,
size.height,
(size.height as f32 / size.width as f32) * 100.
)?;
}
_ => self.0.start(w, element)?,
}
Ok(())
}
fn end<W: Write>(&mut self, w: W, element: &Element) -> Result<()> {
self.0.end(w, element)
}
}
fn write_nodejs_package_version<W: Write>(mut w: W, package: &str) -> Result<()> {
write!(w, "<code>")?;
let output = Command::new("yarn")
.args(&["--cwd", "web", "--silent", "list", "--pattern", package])
.output()?;
let stdout = String::from_utf8(output.stdout)?;
if let Some(version) = stdout.trim().split_whitespace().last() {
write!(w, "{}", version)?;
}
write!(w, "</code>")?;
Ok(())
}
fn should_insert_space(c1: Option<char>, c2: Option<char>) -> bool {
const PUNCTUATIONS: [char; 14] = [
'。', '?', ',', '、', ';', ':', '“', '”', '「', '」', '(', ')', '《', '》',
];
if let (Some(c1), Some(c2)) = (c1, c2) {
(c1.is_ascii_graphic() && c2.is_ascii_graphic())
|| (c1.is_ascii_graphic()
&& 0x4E00 < (c2 as u32)
&& (c2 as u32) < 0x9FFF
&& !PUNCTUATIONS.contains(&c2))
|| (c2.is_ascii_graphic()
&& 0x4E00 < (c1 as u32)
&& (c1 as u32) < 0x9FFF
&& !PUNCTUATIONS.contains(&c1))
} else {
false
}
}
|
{
self.0.end(w, element)
}
|
Solution.py
|
"""
给定一个没有重复数字的序列,返回其所有可能的全排列。
"""
from typing import List
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
if len(nums) == 1:
return [nums]
else:
ret = []
for i in range(len(nums)):
sub_permute = self.permute(nums[0:i] + nums[i + 1:len(nums)])
for item in sub_permute:
ret.append([nums[i]] + item)
|
return ret
|
|
handy.rs
|
use crate::error::TLSError;
use crate::key;
use crate::server;
use crate::server::ClientHello;
use crate::sign;
use webpki;
use std::collections;
use std::sync::Arc;
#[cfg(not(target_env = "sgx"))]
use std::sync::Mutex;
#[cfg(target_env = "sgx")]
use std::sync::SgxMutex as Mutex;
/// Something which never stores sessions.
pub struct NoServerSessionStorage {}
impl server::StoresServerSessions for NoServerSessionStorage {
fn put(&self, _id: Vec<u8>, _sec: Vec<u8>) -> bool {
false
}
fn get(&self, _id: &[u8]) -> Option<Vec<u8>> {
None
}
fn take(&self, _id: &[u8]) -> Option<Vec<u8>> {
None
}
}
/// An implementor of `StoresServerSessions` that stores everything
/// in memory. If enforces a limit on the number of stored sessions
/// to bound memory usage.
pub struct ServerSessionMemoryCache {
cache: Mutex<collections::HashMap<Vec<u8>, Vec<u8>>>,
max_entries: usize,
}
impl ServerSessionMemoryCache {
/// Make a new ServerSessionMemoryCache. `size` is the maximum
/// number of stored sessions.
pub fn new(size: usize) -> Arc<ServerSessionMemoryCache> {
debug_assert!(size > 0);
Arc::new(ServerSessionMemoryCache {
cache: Mutex::new(collections::HashMap::new()),
max_entries: size,
})
}
fn limit_size(&self)
|
}
impl server::StoresServerSessions for ServerSessionMemoryCache {
fn put(&self, key: Vec<u8>, value: Vec<u8>) -> bool {
self.cache
.lock()
.unwrap()
.insert(key, value);
self.limit_size();
true
}
fn get(&self, key: &[u8]) -> Option<Vec<u8>> {
self.cache
.lock()
.unwrap()
.get(key)
.cloned()
}
fn take(&self, key: &[u8]) -> Option<Vec<u8>> {
self.cache.lock().unwrap().remove(key)
}
}
/// Something which never produces tickets.
pub struct NeverProducesTickets {}
impl server::ProducesTickets for NeverProducesTickets {
fn enabled(&self) -> bool {
false
}
fn get_lifetime(&self) -> u32 {
0
}
fn encrypt(&self, _bytes: &[u8]) -> Option<Vec<u8>> {
None
}
fn decrypt(&self, _bytes: &[u8]) -> Option<Vec<u8>> {
None
}
}
/// Something which never resolves a certificate.
pub struct FailResolveChain {}
impl server::ResolvesServerCert for FailResolveChain {
fn resolve(&self, _client_hello: ClientHello) -> Option<sign::CertifiedKey> {
None
}
}
/// Something which always resolves to the same cert chain.
pub struct AlwaysResolvesChain(sign::CertifiedKey);
impl AlwaysResolvesChain {
/// Creates an `AlwaysResolvesChain`, auto-detecting the underlying private
/// key type and encoding.
pub fn new(
chain: Vec<key::Certificate>,
priv_key: &key::PrivateKey,
) -> Result<AlwaysResolvesChain, TLSError> {
let key = sign::any_supported_type(priv_key)
.map_err(|_| TLSError::General("invalid private key".into()))?;
Ok(AlwaysResolvesChain(sign::CertifiedKey::new(
chain,
Arc::new(key),
)))
}
/// Creates an `AlwaysResolvesChain`, auto-detecting the underlying private
/// key type and encoding.
///
/// If non-empty, the given OCSP response and SCTs are attached.
pub fn new_with_extras(
chain: Vec<key::Certificate>,
priv_key: &key::PrivateKey,
ocsp: Vec<u8>,
scts: Vec<u8>,
) -> Result<AlwaysResolvesChain, TLSError> {
let mut r = AlwaysResolvesChain::new(chain, priv_key)?;
if !ocsp.is_empty() {
r.0.ocsp = Some(ocsp);
}
if !scts.is_empty() {
r.0.sct_list = Some(scts);
}
Ok(r)
}
}
impl server::ResolvesServerCert for AlwaysResolvesChain {
fn resolve(&self, _client_hello: ClientHello) -> Option<sign::CertifiedKey> {
Some(self.0.clone())
}
}
/// Something that resolves do different cert chains/keys based
/// on client-supplied server name (via SNI).
pub struct ResolvesServerCertUsingSNI {
by_name: collections::HashMap<String, sign::CertifiedKey>,
}
impl ResolvesServerCertUsingSNI {
/// Create a new and empty (ie, knows no certificates) resolver.
pub fn new() -> ResolvesServerCertUsingSNI {
ResolvesServerCertUsingSNI {
by_name: collections::HashMap::new(),
}
}
/// Add a new `sign::CertifiedKey` to be used for the given SNI `name`.
///
/// This function fails if `name` is not a valid DNS name, or if
/// it's not valid for the supplied certificate, or if the certificate
/// chain is syntactically faulty.
pub fn add(&mut self, name: &str, ck: sign::CertifiedKey) -> Result<(), TLSError> {
let checked_name = webpki::DNSNameRef::try_from_ascii_str(name)
.map_err(|_| TLSError::General("Bad DNS name".into()))?;
ck.cross_check_end_entity_cert(Some(checked_name))?;
self.by_name.insert(name.into(), ck);
Ok(())
}
}
impl server::ResolvesServerCert for ResolvesServerCertUsingSNI {
fn resolve(&self, client_hello: ClientHello) -> Option<sign::CertifiedKey> {
if let Some(name) = client_hello.server_name() {
self.by_name.get(name.into()).cloned()
} else {
// This kind of resolver requires SNI
None
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::server::ProducesTickets;
use crate::server::ResolvesServerCert;
use crate::StoresServerSessions;
#[test]
fn test_noserversessionstorage_drops_put() {
let c = NoServerSessionStorage {};
assert_eq!(c.put(vec![0x01], vec![0x02]), false);
}
#[test]
fn test_noserversessionstorage_denies_gets() {
let c = NoServerSessionStorage {};
c.put(vec![0x01], vec![0x02]);
assert_eq!(c.get(&[]), None);
assert_eq!(c.get(&[0x01]), None);
assert_eq!(c.get(&[0x02]), None);
}
#[test]
fn test_noserversessionstorage_denies_takes() {
let c = NoServerSessionStorage {};
assert_eq!(c.take(&[]), None);
assert_eq!(c.take(&[0x01]), None);
assert_eq!(c.take(&[0x02]), None);
}
#[test]
fn test_serversessionmemorycache_accepts_put() {
let c = ServerSessionMemoryCache::new(4);
assert_eq!(c.put(vec![0x01], vec![0x02]), true);
}
#[test]
fn test_serversessionmemorycache_persists_put() {
let c = ServerSessionMemoryCache::new(4);
assert_eq!(c.put(vec![0x01], vec![0x02]), true);
assert_eq!(c.get(&[0x01]), Some(vec![0x02]));
assert_eq!(c.get(&[0x01]), Some(vec![0x02]));
}
#[test]
fn test_serversessionmemorycache_overwrites_put() {
let c = ServerSessionMemoryCache::new(4);
assert_eq!(c.put(vec![0x01], vec![0x02]), true);
assert_eq!(c.put(vec![0x01], vec![0x04]), true);
assert_eq!(c.get(&[0x01]), Some(vec![0x04]));
}
#[test]
fn test_serversessionmemorycache_drops_to_maintain_size_invariant() {
let c = ServerSessionMemoryCache::new(4);
assert_eq!(c.put(vec![0x01], vec![0x02]), true);
assert_eq!(c.put(vec![0x03], vec![0x04]), true);
assert_eq!(c.put(vec![0x05], vec![0x06]), true);
assert_eq!(c.put(vec![0x07], vec![0x08]), true);
assert_eq!(c.put(vec![0x09], vec![0x0a]), true);
let mut count = 0;
if c.get(&[0x01]).is_some() {
count += 1;
}
if c.get(&[0x03]).is_some() {
count += 1;
}
if c.get(&[0x05]).is_some() {
count += 1;
}
if c.get(&[0x07]).is_some() {
count += 1;
}
if c.get(&[0x09]).is_some() {
count += 1;
}
assert_eq!(count, 4);
}
#[test]
fn test_neverproducestickets_does_nothing() {
let npt = NeverProducesTickets {};
assert_eq!(false, npt.enabled());
assert_eq!(0, npt.get_lifetime());
assert_eq!(None, npt.encrypt(&[]));
assert_eq!(None, npt.decrypt(&[]));
}
#[test]
fn test_failresolvechain_does_nothing() {
let frc = FailResolveChain {};
assert!(
frc.resolve(ClientHello::new(None, &[], None))
.is_none()
);
}
#[test]
fn test_resolvesservercertusingsni_requires_sni() {
let rscsni = ResolvesServerCertUsingSNI::new();
assert!(
rscsni
.resolve(ClientHello::new(None, &[], None))
.is_none()
);
}
#[test]
fn test_resolvesservercertusingsni_handles_unknown_name() {
let rscsni = ResolvesServerCertUsingSNI::new();
let name = webpki::DNSNameRef::try_from_ascii_str("hello.com").unwrap();
assert!(
rscsni
.resolve(ClientHello::new(Some(name), &[], None))
.is_none()
);
}
}
|
{
let mut cache = self.cache.lock().unwrap();
while cache.len() > self.max_entries {
let k = cache.keys().next().unwrap().clone();
cache.remove(&k);
}
}
|
basic.js
|
/*本地存储*/
function setStore(n, v) {
window.localStorage.setItem(n, v);
}
function getStore
|
return window.localStorage.getItem(n);
}
function hsetStore(n, v) {
window.sessionStorage.setItem(n, v);
}
function hgetStore(n) {
return window.sessionStorage.getItem(n);
}
Array.prototype.getObjByproprety = function (val, name) {
var arr = this;
for (let index = 0; index < arr.length; index++) {
const element = arr[index];
if (element[name] == val) {
return element;
}
}
return {};
}
|
(n) {
|
policy_decision.py
|
#!/usr/bin/env python
__author__ = 'Stephen P. Henrie'
from os import path
from StringIO import StringIO
from ndg.xacml.parsers.etree.factory import ReaderFactory
from ndg.xacml.core import Identifiers, XACML_1_0_PREFIX
from ndg.xacml.core.attribute import Attribute
from ndg.xacml.core.attributevalue import (AttributeValue,
AttributeValueClassFactory)
from ndg.xacml.core.functions import functionMap
from ndg.xacml.core.context.request import Request
from ndg.xacml.core.context.subject import Subject
from ndg.xacml.core.context.resource import Resource
from ndg.xacml.core.context.action import Action
from ndg.xacml.core.context.environment import Environment
from ndg.xacml.core.context.pdp import PDP
from ndg.xacml.core.context.result import Decision
from pyon.core.bootstrap import IonObject
from pyon.core.exception import NotFound
from pyon.core.governance import ION_MANAGER
from pyon.core.registry import is_ion_object, message_classes, get_class_decorator_value
from pyon.core.governance.governance_dispatcher import GovernanceDispatcher
from pyon.util.log import log
COMMON_SERVICE_POLICY_RULES = 'common_service_policy_rules'
THIS_DIR = path.dirname(__file__)
XACML_EMPTY_POLICY_FILENAME = 'empty_policy_set.xml'
ROLE_ATTRIBUTE_ID = XACML_1_0_PREFIX + 'subject:subject-role-id'
SENDER_ID = XACML_1_0_PREFIX + 'subject:subject-sender-id'
RECEIVER_TYPE = XACML_1_0_PREFIX + 'resource:receiver-type'
ACTION_VERB = XACML_1_0_PREFIX + 'action:action-verb'
ACTION_PARAMETERS = XACML_1_0_PREFIX + 'action:param-dict'
DICT_TYPE_URI = AttributeValue.IDENTIFIER_PREFIX + 'dict'
OBJECT_TYPE_URI = AttributeValue.IDENTIFIER_PREFIX + 'object'
#"""XACML DATATYPES"""
attributeValueFactory = AttributeValueClassFactory()
StringAttributeValue = attributeValueFactory(AttributeValue.STRING_TYPE_URI)
IntAttributeValue = attributeValueFactory(AttributeValue.INTEGER_TYPE_URI)
DoubleAttributeValue = attributeValueFactory(AttributeValue.DOUBLE_TYPE_URI)
BooleanAttributeValue = attributeValueFactory(AttributeValue.BOOLEAN_TYPE_URI)
class PolicyDecisionPointManager(object):
def __init__(self, governance_controller):
self.resource_policy_decision_point = dict()
self.service_policy_decision_point = dict()
self.empty_pdp = PDP.fromPolicySource(path.join(THIS_DIR, XACML_EMPTY_POLICY_FILENAME), ReaderFactory)
self.load_common_service_policy_rules('')
self.governance_controller = governance_controller
#Create and register an Attribute Value derived class to handle a dict type used for the messages
_className = 'Dict' + AttributeValue.CLASS_NAME_SUFFIX
_classVars = {'TYPE': dict, 'IDENTIFIER': DICT_TYPE_URI}
_attributeValueClass = type(_className, (AttributeValue, ), _classVars)
AttributeValue.register(_attributeValueClass)
attributeValueFactory.addClass(DICT_TYPE_URI, _attributeValueClass)
self.DictAttributeValue = attributeValueFactory(DICT_TYPE_URI)
#Create and register an Attribute Value derived class to handle any object
_className = 'Object' + AttributeValue.CLASS_NAME_SUFFIX
_classVars = {'TYPE': object, 'IDENTIFIER': OBJECT_TYPE_URI}
_attributeValueClass = type(_className, (AttributeValue, ), _classVars)
AttributeValue.register(_attributeValueClass)
attributeValueFactory.addClass(OBJECT_TYPE_URI, _attributeValueClass)
self.ObjectAttributeValue = attributeValueFactory(OBJECT_TYPE_URI)
#Create and add new function for evaluating functions that take the message as a dict
from pyon.core.governance.policy.evaluate import EvaluateCode, EvaluateFunction
functionMap['urn:oasis:names:tc:xacml:1.0:function:evaluate-code'] = EvaluateCode
functionMap['urn:oasis:names:tc:xacml:1.0:function:evaluate-function'] = EvaluateFunction
def _get_default_policy_template(self):
#TODO - Put in resource registry as object and load in preload
policy_template = '''<?xml version="1.0" encoding="UTF-8"?>
<Policy xmlns="urn:oasis:names:tc:xacml:2.0:policy:schema:os"
xmlns:xacml-context="urn:oasis:names:tc:xacml:2.0:context:schema:os"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:oasis:names:tc:xacml:2.0:policy:schema:os http://docs.oasis-open.org/xacml/access_control-xacml-2.0-policy-schema-os.xsd"
xmlns:xf="http://www.w3.org/TR/2002/WD-xquery-operators-20020816/#"
xmlns:md="http:www.med.example.com/schemas/record.xsd"
PolicyId="%s"
RuleCombiningAlgId="urn:oasis:names:tc:xacml:1.0:rule-combining-algorithm:permit-overrides">
<PolicyDefaults>
<XPathVersion>http://www.w3.org/TR/1999/Rec-xpath-19991116</XPathVersion>
</PolicyDefaults>
%s
</Policy>'''
return policy_template
def _get_resource_policy_template(self):
#TODO - Put in resource registry as object and load in preload
policy_template = '''<?xml version="1.0" encoding="UTF-8"?>
<Policy xmlns="urn:oasis:names:tc:xacml:2.0:policy:schema:os"
xmlns:xacml-context="urn:oasis:names:tc:xacml:2.0:context:schema:os"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:oasis:names:tc:xacml:2.0:policy:schema:os http://docs.oasis-open.org/xacml/access_control-xacml-2.0-policy-schema-os.xsd"
xmlns:xf="http://www.w3.org/TR/2002/WD-xquery-operators-20020816/#"
xmlns:md="http:www.med.example.com/schemas/record.xsd"
PolicyId="%s"
RuleCombiningAlgId="urn:oasis:names:tc:xacml:1.0:rule-combining-algorithm:first-applicable">
<PolicyDefaults>
<XPathVersion>http://www.w3.org/TR/1999/Rec-xpath-19991116</XPathVersion>
</PolicyDefaults>
%s
</Policy>'''
return policy_template
def create_policy_from_rules(self, policy_identifier, rules):
policy = self._get_default_policy_template()
policy_rules = policy % (policy_identifier, rules)
return policy_rules
def
|
(self, policy_identifier, rules):
policy = self._get_resource_policy_template()
policy_rules = policy % (policy_identifier, rules)
return policy_rules
#Return a compiled policy indexed by the specified resource_id
def get_resource_pdp(self, resource_key):
#First look for requested resource key
if self.resource_policy_decision_point.has_key(resource_key):
return self.resource_policy_decision_point[resource_key]
#If a PDP does not exist for this resource key - then return default
return self.empty_pdp
#Return a compiled policy indexed by the specified resource_id
def get_service_pdp(self, service_name):
#First look for requested resource key
if self.service_policy_decision_point.has_key(service_name):
return self.service_policy_decision_point[service_name]
#If a PDP does not exist for this resource key - then return common set of service policies
return self.load_common_service_pdp
def list_resource_policies(self):
return self.resource_policy_decision_point.keys()
def list_service_policies(self):
return self.service_policy_decision_point.keys()
def load_common_service_policy_rules(self, rules_text):
self.common_service_rules = rules_text
input_source = StringIO(self.create_policy_from_rules(COMMON_SERVICE_POLICY_RULES, rules_text))
self.load_common_service_pdp = PDP.fromPolicySource(input_source, ReaderFactory)
def load_service_policy_rules(self, service_name, rules_text):
if not rules_text and not self.service_policy_decision_point.has_key(service_name):
return
log.debug("Loading policies for service: %s" % service_name)
self.clear_service_policy(service_name)
service_rule_set = self.common_service_rules + rules_text
#Simply create a new PDP object for the service
input_source = StringIO(self.create_policy_from_rules(service_name, service_rule_set))
self.service_policy_decision_point[service_name] = PDP.fromPolicySource(input_source, ReaderFactory)
def load_resource_policy_rules(self, resource_key, rules_text):
if not rules_text and not self.resource_policy_decision_point.has_key(resource_key):
return
log.debug("Loading policies for resource: %s" % resource_key)
#print rules_text
self.clear_resource_policy(resource_key)
#Simply create a new PDP object for the service
input_source = StringIO(self.create_resource_policy_from_rules(resource_key, rules_text))
self.resource_policy_decision_point[resource_key] = PDP.fromPolicySource(input_source, ReaderFactory)
#Remove any policy indexed by the resource_key
def clear_resource_policy(self, resource_key):
if self.resource_policy_decision_point.has_key(resource_key):
del self.resource_policy_decision_point[resource_key]
#Remove any policy indexed by the service_name
def clear_service_policy(self, service_name):
if self.service_policy_decision_point.has_key(service_name):
del self.service_policy_decision_point[service_name]
#Remove all policies
def clear_policy_cache(self):
self.resource_policy_decision_point.clear()
self.service_policy_decision_point.clear()
self.load_common_service_policy_rules('')
def create_attribute(self, attrib_class, attrib_id, val):
attribute = Attribute()
attribute.attributeId = attrib_id
attribute.dataType = attrib_class.IDENTIFIER
attribute.attributeValues.append(attrib_class())
attribute.attributeValues[-1].value = val
return attribute
def create_string_attribute(self, attrib_id, val):
return self.create_attribute(StringAttributeValue, attrib_id, val)
def create_int_attribute(self, attrib_id, val):
return self.create_attribute(IntAttributeValue, attrib_id, val)
def create_double_attribute(self, attrib_id, val):
return self.create_attribute(DoubleAttributeValue, attrib_id, val)
def create_boolean_attribute(self, attrib_id, val):
return self.create_attribute(BooleanAttributeValue, attrib_id, val)
def create_dict_attribute(self, attrib_id, val):
return self.create_attribute(self.DictAttributeValue, attrib_id, val)
def create_object_attribute(self, attrib_id, val):
return self.create_attribute(self.ObjectAttributeValue, attrib_id, val)
def create_org_role_attribute(self, actor_roles, subject):
attribute = None
for role in actor_roles:
if attribute is None:
attribute = self.create_string_attribute(ROLE_ATTRIBUTE_ID, role)
else:
attribute.attributeValues.append(StringAttributeValue())
attribute.attributeValues[-1].value = role
if attribute is not None:
subject.attributes.append(attribute)
def _create_request_from_message(self, invocation, receiver, receiver_type='service'):
sender, sender_type = invocation.get_message_sender()
op = invocation.get_header_value('op', 'Unknown')
ion_actor_id = invocation.get_header_value('ion-actor-id', 'anonymous')
actor_roles = invocation.get_header_value('ion-actor-roles', {})
message_format = invocation.get_header_value('format', '')
#log.debug("Checking XACML Request: receiver_type: %s, sender: %s, receiver:%s, op:%s, ion_actor_id:%s, ion_actor_roles:%s", receiver_type, sender, receiver, op, ion_actor_id, actor_roles)
request = Request()
subject = Subject()
subject.attributes.append(self.create_string_attribute(SENDER_ID, sender))
subject.attributes.append(self.create_string_attribute(Identifiers.Subject.SUBJECT_ID, ion_actor_id))
#Get the Org name associated with the endpoint process
endpoint_process = invocation.get_arg_value('process', None)
if endpoint_process is not None and hasattr(endpoint_process,'org_governance_name'):
org_governance_name = endpoint_process.org_governance_name
else:
org_governance_name = self.governance_controller.system_root_org_name
#If this process is not associated wiht the root Org, then iterate over the roles associated with the user only for
#the Org that this process is associated with otherwise include all roles and create attributes for each
if org_governance_name == self.governance_controller.system_root_org_name:
#log.debug("Including roles for all Orgs")
#If the process Org name is the same for the System Root Org, then include all of them to be safe
for org in actor_roles:
self.create_org_role_attribute(actor_roles[org],subject)
else:
if actor_roles.has_key(org_governance_name):
log.debug("Org Roles (%s): %s" , org_governance_name, ' '.join(actor_roles[org_governance_name]))
self.create_org_role_attribute(actor_roles[org_governance_name],subject)
#Handle the special case for the ION system actor
if actor_roles.has_key(self.governance_controller.system_root_org_name):
if ION_MANAGER in actor_roles[self.governance_controller.system_root_org_name]:
log.debug("Including ION_MANAGER role")
self.create_org_role_attribute([ION_MANAGER],subject)
request.subjects.append(subject)
resource = Resource()
resource.attributes.append(self.create_string_attribute(Identifiers.Resource.RESOURCE_ID, receiver))
resource.attributes.append(self.create_string_attribute(RECEIVER_TYPE, receiver_type))
request.resources.append(resource)
request.action = Action()
request.action.attributes.append(self.create_string_attribute(Identifiers.Action.ACTION_ID, op))
#Check to see if there is a OperationVerb decorator specifying a Verb used with policy
if is_ion_object(message_format):
try:
msg_class = message_classes[message_format]
operation_verb = get_class_decorator_value(msg_class,'OperationVerb')
if operation_verb is not None:
request.action.attributes.append(self.create_string_attribute(ACTION_VERB, operation_verb))
except NotFound:
pass
#Create generic attributes for each of the primitive message parameter types to be available in XACML rules
parameter_dict = {'message': invocation.message, 'headers': invocation.headers, 'annotations': invocation.message_annotations }
if endpoint_process is not None:
parameter_dict['process'] = endpoint_process
request.action.attributes.append(self.create_dict_attribute(ACTION_PARAMETERS, parameter_dict))
return request
def check_agent_request_policies(self, invocation):
process = invocation.get_arg_value('process')
if not process:
raise NotFound('Cannot find process in message')
decision = self.check_resource_request_policies(invocation, process.resource_id)
log.debug("Resource policy Decision: %s", decision)
# todo: check if its OK to treat everything but Deny as Permit (Ex: NotApplicable)
# Return if agent service policies deny the operation
if decision == Decision.DENY:
return decision
# Else check any policies that might be associated with the resource.
decision = self._check_service_request_policies(invocation, 'agent')
return decision
def check_service_request_policies(self, invocation):
decision = self._check_service_request_policies(invocation, 'service')
return decision
def _check_service_request_policies(self, invocation, receiver_type):
receiver = invocation.get_message_receiver()
if not receiver:
raise NotFound('No receiver for this message')
requestCtx = self._create_request_from_message(invocation, receiver, receiver_type)
pdp = self.get_service_pdp(receiver)
if pdp is None:
return Decision.NOT_APPLICABLE
return self._evaluate_pdp(invocation, pdp, requestCtx)
def check_resource_request_policies(self, invocation, resource_id):
if not resource_id:
raise NotFound('The resource_id is not set')
requestCtx = self._create_request_from_message(invocation, resource_id, 'resource')
pdp = self.get_resource_pdp(resource_id)
if pdp is None:
return Decision.NOT_APPLICABLE
return self._evaluate_pdp(invocation, pdp, requestCtx)
def _evaluate_pdp(self, invocation, pdp, requestCtx):
try:
response = pdp.evaluate(requestCtx)
except Exception, e:
log.error("Error evaluating policies: %s" % e.message)
return Decision.NOT_APPLICABLE
if response is None:
log.debug('response from PDP contains nothing, so not authorized')
return Decision.DENY
if invocation.message_annotations.has_key(GovernanceDispatcher.POLICY__STATUS_REASON_ANNOTATION):
return Decision.DENY
for result in response.results:
if result.decision == Decision.DENY:
break
return result.decision
|
create_resource_policy_from_rules
|
hello_test.go
|
package hello_test
import (
"bytes"
"hello"
"testing"
)
func TestPrintsHelloMessageToWriter(t *testing.T)
|
{
fakeTerminal := &bytes.Buffer{}
p := hello.Printer{
Output: fakeTerminal,
}
p.Print()
want := "Hello, world\n"
got := fakeTerminal.String()
if want != got {
t.Errorf("want %q, got %q", want, got)
}
}
|
|
app.js
|
'use strict'
// modules
var express = require('express')
var json = require('express-json')
var bodyParser = require('body-parser')
var multer = require('multer')
// files
var conf = require('../config/gulp.config')
var routes = require('./routes')
var config = require('./routes/config')
var contents = require('./routes/contents')
var upload = require('./routes/upload')
var remove = require('./routes/remove')
var converter = require('./converter/converter.js')
var publish = require('./publish/publish.js')
// config
var serverPort = conf.port.server_dev
var fileUploadDone = false
// app
var app = express()
//.use(express.limit(100000000))
.use(require('express-promise')())
//.use(json())
//.use(bodyParser.urlencoded({limit: '50mb'}))
//.use(express.json())
//.use(express.multipart())
.use(bodyParser.json({limit: '50mb'}))
//.use(express.bodyParser())
// cross domain
.use(multer({
dest: './uploads/',
rename: function (fieldname, filename) {
return filename + Date.now()
},
onFileUploadStart: function (file) {
console.log(file.originalname + ' is starting ...')
},
onFileUploadComplete: function (file) {
console.log(file.fieldname + ' uploaded to ' + file.path)
fileUploadDone = true
}
}))
.use(function(req, res, next) {
res.header("Access-Control-Allow-Origin", "*")
res.header('Access-Control-Allow-Methods', 'PUT, GET, POST, DELETE, OPTIONS')
res.header("Access-Control-Allow-Headers", "X-Requested-With")
next()
})
.all('*', function(req, res, next) {
res.header("Access-Control-Allow-Origin", "*")
res.header('Access-Control-Allow-Methods', 'PUT, GET, POST, DELETE, OPTIONS')
res.header("Access-Control-Allow-Headers", "X-Requested-With")
next()
})
.get('/', routes.index)
.get('/config', config.getConfig)
.get('/contents', contents.getContents)
.post('/converter/progress', converter.getProgress)
.post('/contents', contents.postContents)
.post('/publish', publish.publish)
.post('/upload/image', upload.uploadImage)
.post('/upload/bgimage', upload.uploadBgImage)
.post('/upload/video', upload.uploadVideo)
.post('/upload/pdf', upload.uploadPdf)
.post('/remove/files', remove.removeFiles)
|
.post('/remove/bgimages', remove.removeBgImages)
.get('/test', function(req, res) {
//res.sendfile('./.tmp/index.html')
res.json({result: 'ok'})
})
.listen(serverPort)
|
.post('/remove/folder', remove.removeFolder)
|
demo.rs
|
extern crate failure;
extern crate log;
extern crate stderrlog;
extern crate termion;
extern crate tui;
#[allow(dead_code)]
mod util;
use std::io;
use termion::event::Key;
use termion::input::MouseTerminal;
use termion::raw::IntoRawMode;
use termion::screen::AlternateScreen;
use tui::backend::{Backend, TermionBackend};
use tui::layout::{Constraint, Direction, Layout, Rect};
use tui::style::{Color, Modifier, Style};
use tui::widgets::canvas::{Canvas, Line, Map, MapResolution};
use tui::widgets::{
Axis, BarChart, Block, Borders, Chart, Dataset, Gauge, List, Marker, Paragraph, Row,
SelectableList, Sparkline, Table, Tabs, Text, Widget,
};
use tui::{Frame, Terminal};
use util::event::{Event, Events};
use util::{RandomSignal, SinSignal, TabsState};
struct Server<'a> {
name: &'a str,
location: &'a str,
coords: (f64, f64),
status: &'a str,
}
struct App<'a> {
items: Vec<&'a str>,
events: Vec<(&'a str, &'a str)>,
selected: usize,
tabs: TabsState<'a>,
show_chart: bool,
progress: u16,
data: Vec<u64>,
data2: Vec<(f64, f64)>,
data3: Vec<(f64, f64)>,
data4: Vec<(&'a str, u64)>,
window: [f64; 2],
colors: [Color; 2],
color_index: usize,
servers: Vec<Server<'a>>,
}
fn main() -> Result<(), failure::Error> {
stderrlog::new()
.module(module_path!())
.verbosity(4)
.init()?;
let stdout = io::stdout().into_raw_mode()?;
let stdout = MouseTerminal::from(stdout);
let stdout = AlternateScreen::from(stdout);
let backend = TermionBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
terminal.hide_cursor()?;
let events = Events::new();
let mut rand_signal = RandomSignal::new(0, 100);
let mut sin_signal = SinSignal::new(0.2, 3.0, 18.0);
let mut sin_signal2 = SinSignal::new(0.1, 2.0, 10.0);
let mut app = App {
items: vec![
"Item1", "Item2", "Item3", "Item4", "Item5", "Item6", "Item7", "Item8", "Item9",
"Item10", "Item11", "Item12", "Item13", "Item14", "Item15", "Item16", "Item17",
"Item18", "Item19", "Item20", "Item21", "Item22", "Item23", "Item24",
],
events: vec![
("Event1", "INFO"),
("Event2", "INFO"),
("Event3", "CRITICAL"),
("Event4", "ERROR"),
("Event5", "INFO"),
("Event6", "INFO"),
("Event7", "WARNING"),
("Event8", "INFO"),
("Event9", "INFO"),
("Event10", "INFO"),
("Event11", "CRITICAL"),
("Event12", "INFO"),
("Event13", "INFO"),
("Event14", "INFO"),
("Event15", "INFO"),
("Event16", "INFO"),
("Event17", "ERROR"),
("Event18", "ERROR"),
("Event19", "INFO"),
("Event20", "INFO"),
("Event21", "WARNING"),
("Event22", "INFO"),
("Event23", "INFO"),
("Event24", "WARNING"),
("Event25", "INFO"),
("Event26", "INFO"),
],
selected: 0,
tabs: TabsState::new(vec!["Tab0", "Tab1"]),
show_chart: true,
progress: 0,
data: rand_signal.by_ref().take(300).collect(),
data2: sin_signal.by_ref().take(100).collect(),
data3: sin_signal2.by_ref().take(200).collect(),
data4: vec![
("B1", 9),
("B2", 12),
("B3", 5),
("B4", 8),
("B5", 2),
("B6", 4),
("B7", 5),
("B8", 9),
("B9", 14),
("B10", 15),
("B11", 1),
("B12", 0),
("B13", 4),
("B14", 6),
("B15", 4),
("B16", 6),
("B17", 4),
("B18", 7),
("B19", 13),
("B20", 8),
("B21", 11),
("B22", 9),
("B23", 3),
("B24", 5),
],
window: [0.0, 20.0],
colors: [Color::Magenta, Color::Red],
color_index: 0,
servers: vec![
Server {
name: "NorthAmerica-1",
location: "New York City",
coords: (40.71, -74.00),
status: "Up",
},
Server {
name: "Europe-1",
location: "Paris",
coords: (48.85, 2.35),
status: "Failure",
},
Server {
name: "SouthAmerica-1",
location: "São Paulo",
coords: (-23.54, -46.62),
status: "Up",
},
Server {
name: "Asia-1",
location: "Singapore",
coords: (1.35, 103.86),
status: "Up",
},
],
};
loop {
// Draw UI
terminal.draw(|mut f| {
let chunks = Layout::default()
.constraints([Constraint::Length(3), Constraint::Min(0)].as_ref())
.split(f.size());
Tabs::default()
.block(Block::default().borders(Borders::ALL).title("Tabs"))
.titles(&app.tabs.titles)
.style(Style::default().fg(Color::Green))
.highlight_style(Style::default().fg(Color::Yellow))
.select(app.tabs.index)
.render(&mut f, chunks[0]);
match app.tabs.index {
0 => draw_first_tab(&mut f, &app, chunks[1]),
1 => draw_second_tab(&mut f, &app, chunks[1]),
_ => {}
};
})?;
match events.next()? {
Event::Input(input) => match input {
Key::Char('q') => {
break;
}
Key::Up => {
if app.selected > 0 {
app.selected -= 1
};
}
Key::Down => {
if app.selected < app.items.len() - 1 {
app.selected += 1;
}
}
Key::Left => {
app.tabs.previous();
}
Key::Right => {
app.tabs.next();
}
Key::Char('t') => {
app.show_chart = !app.show_chart;
}
_ => {}
},
Event::Tick => {
app.progress += 5;
if app.progress > 100 {
app.progress = 0;
}
app.data.insert(0, rand_signal.next().unwrap());
app.data.pop();
for _ in 0..5 {
app.data2.remove(0);
app.data2.push(sin_signal.next().unwrap());
}
for _ in 0..10 {
app.data3.remove(0);
app.data3.push(sin_signal2.next().unwrap());
}
let i = app.data4.pop().unwrap();
app.data4.insert(0, i);
app.window[0] += 1.0;
app.window[1] += 1.0;
let i = app.events.pop().unwrap();
app.events.insert(0, i);
app.color_index += 1;
if app.color_index >= app.colors.len() {
app.color_index = 0;
}
}
}
}
Ok(())
}
fn draw_first_tab<B>(f: &mut Frame<B>, app: &App, area: Rect)
where
B: Backend,
{
let chunks = Layout::default()
.constraints(
[
Constraint::Length(7),
Constraint::Min(7),
Constraint::Length(7),
]
.as_ref(),
)
.split(area);
draw_gauges(f, app, chunks[0]);
draw_charts(f, app, chunks[1]);
draw_text(f, chunks[2]);
}
fn draw_gauges<B>(f: &mut Frame<B>, app: &App, area: Rect)
where
B: Backend,
{
let chunks = Layout::default()
.constraints([Constraint::Length(2), Constraint::Length(3)].as_ref())
.margin(1)
.split(area);
Block::default()
.borders(Borders::ALL)
.title("Graphs")
.render(f, area);
Gauge::default()
.block(Block::default().title("Gauge:"))
.style(
Style::default()
.fg(Color::Magenta)
.bg(Color::Black)
.modifier(Modifier::Italic),
)
.label(&format!("{} / 100", app.progress))
.percent(app.progress)
.render(f, chunks[0]);
Sparkline::default()
.block(Block::default().title("Sparkline:"))
.style(Style::default().fg(Color::Green))
.data(&app.data)
.render(f, chunks[1]);
}
fn draw_charts<B>(f: &mut Frame<B>, app: &App, area: Rect)
where
B: Backend,
{
let constraints = if app.show_chart {
vec![Constraint::Percentage(50), Constraint::Percentage(50)]
} else {
vec![Constraint::Percentage(100)]
};
let chunks = Layout::default()
.constraints(constraints)
.direction(Direction::Horizontal)
.split(area);
{
let chunks = Layout::default()
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(chunks[0]);
{
let chunks = Layout::default()
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.direction(Direction::Horizontal)
.split(chunks[0]);
SelectableList::default()
.block(Block::default().borders(Borders::ALL).title("List"))
.items(&app.items)
.select(Some(app.selected))
.highlight_style(Style::default().fg(Color::Yellow).modifier(Modifier::Bold))
.highlight_symbol(">")
.render(f, chunks[0]);
let info_style = Style::default().fg(Color::White);
let warning_style = Style::default().fg(Color::Yellow);
let error_style = Style::default().fg(Color::Magenta);
let critical_style = Style::default().fg(Color::Red);
let events = app.events.iter().map(|&(evt, level)| {
Text::styled(
format!("{}: {}", level, evt),
match level {
"ERROR" => error_style,
"CRITICAL" => critical_style,
"WARNING" => warning_style,
_ => info_style,
},
)
});
List::new(events)
.block(Block::default().borders(Borders::ALL).title("List"))
.render(f, chunks[1]);
}
BarChart::default()
.block(Block::default().borders(Borders::ALL).title("Bar chart"))
.data(&app.data4)
.bar_width(3)
.bar_gap(2)
.value_style(
Style::default()
.fg(Color::Black)
.bg(Color::Green)
.modifier(Modifier::Italic),
)
.label_style(Style::default().fg(Color::Yellow))
.style(Style::default().fg(Color::Green))
.render(f, chunks[1]);
}
if app.show_chart {
Chart::default()
.block(
Block::default()
.title("Chart")
.title_style(Style::default().fg(Color::Cyan).modifier(Modifier::Bold))
.borders(Borders::ALL),
)
.x_axis(
Axis::default()
.title("X Axis")
.style(Style::default().fg(Color::Gray))
.labels_style(Style::default().modifier(Modifier::Italic))
.bounds(app.window)
.labels(&[
&format!("{}", app.window[0]),
&format!("{}", (app.window[0] + app.window[1]) / 2.0),
&format!("{}", app.window[1]),
]),
)
.y_axis(
Axis::default()
.title("Y Axis")
.style(Style::default().fg(Color::Gray))
.labels_style(Style::default().modifier(Modifier::Italic))
.bounds([-20.0, 20.0])
.labels(&["-20", "0", "20"]),
)
.datasets(&[
Dataset::default()
.name("data2")
.marker(Marker::Dot)
.style(Style::default().fg(Color::Cyan))
.data(&app.data2),
Dataset::default()
.name("data3")
.marker(Marker::Braille)
.style(Style::default().fg(Color::Yellow))
.data(&app.data3),
])
.render(f, chunks[1]);
}
}
fn d
|
B>(f: &mut Frame<B>, area: Rect)
where
B: Backend,
{
let text = [
Text::raw("This is a paragraph with several lines. You can change style your text the way you want.\n\nFox example: "),
Text::styled("under", Style::default().fg(Color::Red)),
Text::raw(" "),
Text::styled("the", Style::default().fg(Color::Green)),
Text::raw(" "),
Text::styled("rainbow", Style::default().fg(Color::Blue)),
Text::raw(".\nOh and if you didn't "),
Text::styled("notice", Style::default().modifier(Modifier::Italic)),
Text::raw(" you can "),
Text::styled("automatically", Style::default().modifier(Modifier::Bold)),
Text::raw(" "),
Text::styled("wrap", Style::default().modifier(Modifier::Invert)),
Text::raw(" your "),
Text::styled("text", Style::default().modifier(Modifier::Underline)),
Text::raw(".\nOne more thing is that it should display unicode characters: 10€")
];
Paragraph::new(text.iter())
.block(
Block::default()
.borders(Borders::ALL)
.title("Footer")
.title_style(Style::default().fg(Color::Magenta).modifier(Modifier::Bold)),
)
.wrap(true)
.render(f, area);
}
fn draw_second_tab<B>(f: &mut Frame<B>, app: &App, area: Rect)
where
B: Backend,
{
let chunks = Layout::default()
.constraints([Constraint::Percentage(30), Constraint::Percentage(70)].as_ref())
.direction(Direction::Horizontal)
.split(area);
let up_style = Style::default().fg(Color::Green);
let failure_style = Style::default().fg(Color::Red);
let header = ["Server", "Location", "Status"];
let rows = app.servers.iter().map(|s| {
let style = if s.status == "Up" {
up_style
} else {
failure_style
};
Row::StyledData(vec![s.name, s.location, s.status].into_iter(), style)
});
Table::new(header.into_iter(), rows)
.block(Block::default().title("Servers").borders(Borders::ALL))
.header_style(Style::default().fg(Color::Yellow))
.widths(&[15, 15, 10])
.render(f, chunks[0]);
Canvas::default()
.block(Block::default().title("World").borders(Borders::ALL))
.paint(|ctx| {
ctx.draw(&Map {
color: Color::White,
resolution: MapResolution::High,
});
ctx.layer();
for (i, s1) in app.servers.iter().enumerate() {
for s2 in &app.servers[i + 1..] {
ctx.draw(&Line {
x1: s1.coords.1,
y1: s1.coords.0,
y2: s2.coords.0,
x2: s2.coords.1,
color: Color::Yellow,
});
}
}
for server in &app.servers {
let color = if server.status == "Up" {
Color::Green
} else {
Color::Red
};
ctx.print(server.coords.1, server.coords.0, "X", color);
}
})
.x_bounds([-180.0, 180.0])
.y_bounds([-90.0, 90.0])
.render(f, chunks[1]);
}
|
raw_text<
|
0001_initial.py
|
# Generated by Django 3.2.9 on 2022-02-03 06:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class
|
(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Details',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100, null=True)),
('last_name', models.CharField(max_length=100, null=True)),
('qualities', models.TextField(null=True)),
('experiences', models.TextField(null=True)),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, null=True, unique=True)),
('bio', models.TextField(null=True)),
('admin', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='GroupChat',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('time_sent', models.DateTimeField(auto_now_add=True, null=True)),
('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='councelapp.group')),
('reporter', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Counsellor',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('details', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='details', to='councelapp.details')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Client',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('counsellor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='counsellor', to='councelapp.counsellor')),
('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='councelapp.group')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='client_profile', to=settings.AUTH_USER_MODEL)),
],
),
]
|
Migration
|
editor.rs
|
use crate::{
clipboard::{get_clipboard_provider, ClipboardProvider},
document::{Mode, SCRATCH_BUFFER_NAME},
graphics::{CursorKind, Rect},
info::Info,
input::KeyEvent,
theme::{self, Theme},
tree::{self, Tree},
Document, DocumentId, View, ViewId,
};
use futures_util::future;
use futures_util::stream::select_all::SelectAll;
use tokio_stream::wrappers::UnboundedReceiverStream;
use std::{
borrow::Cow,
collections::{BTreeMap, HashMap},
io::stdin,
num::NonZeroUsize,
path::{Path, PathBuf},
pin::Pin,
sync::Arc,
};
use tokio::{
sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
time::{sleep, Duration, Instant, Sleep},
};
use anyhow::{bail, Error};
pub use helix_core::diagnostic::Severity;
pub use helix_core::register::Registers;
use helix_core::{
auto_pairs::AutoPairs,
syntax::{self, AutoPairConfig},
Change,
};
use helix_core::{Position, Selection};
use helix_dap as dap;
use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer};
use arc_swap::access::{DynAccess, DynGuard};
fn deserialize_duration_millis<'de, D>(deserializer: D) -> Result<Duration, D::Error>
where
D: serde::Deserializer<'de>,
{
let millis = u64::deserialize(deserializer)?;
Ok(Duration::from_millis(millis))
}
fn serialize_duration_millis<S>(duration: &Duration, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_u64(
duration
.as_millis()
.try_into()
.map_err(|_| serde::ser::Error::custom("duration value overflowed u64"))?,
)
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct FilePickerConfig {
/// IgnoreOptions
/// Enables ignoring hidden files.
/// Whether to hide hidden files in file picker and global search results. Defaults to true.
pub hidden: bool,
/// Enables reading ignore files from parent directories. Defaults to true.
pub parents: bool,
/// Enables reading `.ignore` files.
/// Whether to hide files listed in .ignore in file picker and global search results. Defaults to true.
pub ignore: bool,
/// Enables reading `.gitignore` files.
/// Whether to hide files listed in .gitignore in file picker and global search results. Defaults to true.
pub git_ignore: bool,
/// Enables reading global .gitignore, whose path is specified in git's config: `core.excludefile` option.
/// Whether to hide files listed in global .gitignore in file picker and global search results. Defaults to true.
pub git_global: bool,
/// Enables reading `.git/info/exclude` files.
/// Whether to hide files listed in .git/info/exclude in file picker and global search results. Defaults to true.
pub git_exclude: bool,
/// WalkBuilder options
/// Maximum Depth to recurse directories in file picker and global search. Defaults to `None`.
pub max_depth: Option<usize>,
}
impl Default for FilePickerConfig {
fn default() -> Self {
Self {
hidden: true,
parents: true,
ignore: true,
git_ignore: true,
git_global: true,
git_exclude: true,
max_depth: None,
}
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct Config {
/// Padding to keep between the edge of the screen and the cursor when scrolling. Defaults to 5.
pub scrolloff: usize,
/// Number of lines to scroll at once. Defaults to 3
pub scroll_lines: isize,
/// Mouse support. Defaults to true.
pub mouse: bool,
/// Shell to use for shell commands. Defaults to ["cmd", "/C"] on Windows and ["sh", "-c"] otherwise.
pub shell: Vec<String>,
/// Line number mode.
pub line_number: LineNumber,
/// Gutters. Default ["diagnostics", "line-numbers"]
pub gutters: Vec<GutterType>,
/// Middle click paste support. Defaults to true.
pub middle_click_paste: bool,
/// Automatic insertion of pairs to parentheses, brackets,
/// etc. Optionally, this can be a list of 2-tuples to specify a
/// global list of characters to pair. Defaults to true.
pub auto_pairs: AutoPairConfig,
/// Automatic auto-completion, automatically pop up without user trigger. Defaults to true.
pub auto_completion: bool,
/// Time in milliseconds since last keypress before idle timers trigger.
/// Used for autocompletion, set to 0 for instant. Defaults to 400ms.
#[serde(
serialize_with = "serialize_duration_millis",
deserialize_with = "deserialize_duration_millis"
)]
pub idle_timeout: Duration,
pub completion_trigger_len: u8,
/// Whether to display infoboxes. Defaults to true.
pub auto_info: bool,
pub file_picker: FilePickerConfig,
/// Shape for cursor in each mode
pub cursor_shape: CursorShapeConfig,
/// Set to `true` to override automatic detection of terminal truecolor support in the event of a false negative. Defaults to `false`.
pub true_color: bool,
/// Search configuration.
#[serde(default)]
pub search: SearchConfig,
pub lsp: LspConfig,
/// Column numbers at which to draw the rulers. Default to `[]`, meaning no rulers.
pub rulers: Vec<u16>,
#[serde(default)]
pub whitespace: WhitespaceConfig,
}
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
pub struct LspConfig {
pub display_messages: bool,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", default, deny_unknown_fields)]
pub struct SearchConfig {
/// Smart case: Case insensitive searching unless pattern contains upper case characters. Defaults to true.
pub smart_case: bool,
/// Whether the search should wrap after depleting the matches. Default to true.
pub wrap_around: bool,
}
// Cursor shape is read and used on every rendered frame and so needs
// to be fast. Therefore we avoid a hashmap and use an enum indexed array.
#[derive(Debug, Clone, PartialEq)]
pub struct CursorShapeConfig([CursorKind; 3]);
impl CursorShapeConfig {
pub fn from_mode(&self, mode: Mode) -> CursorKind {
self.get(mode as usize).copied().unwrap_or_default()
}
}
impl<'de> Deserialize<'de> for CursorShapeConfig {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let m = HashMap::<Mode, CursorKind>::deserialize(deserializer)?;
let into_cursor = |mode: Mode| m.get(&mode).copied().unwrap_or_default();
Ok(CursorShapeConfig([
into_cursor(Mode::Normal),
into_cursor(Mode::Select),
into_cursor(Mode::Insert),
]))
}
}
impl Serialize for CursorShapeConfig {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut map = serializer.serialize_map(Some(self.len()))?;
let modes = [Mode::Normal, Mode::Select, Mode::Insert];
for mode in modes {
map.serialize_entry(&mode, &self.from_mode(mode))?;
}
map.end()
}
}
impl std::ops::Deref for CursorShapeConfig {
type Target = [CursorKind; 3];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Default for CursorShapeConfig {
fn default() -> Self {
Self([CursorKind::Block; 3])
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum
|
{
/// Show absolute line number
Absolute,
/// If focused and in normal/select mode, show relative line number to the primary cursor.
/// If unfocused or in insert mode, show absolute line number.
Relative,
}
impl std::str::FromStr for LineNumber {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"absolute" | "abs" => Ok(Self::Absolute),
"relative" | "rel" => Ok(Self::Relative),
_ => anyhow::bail!("Line number can only be `absolute` or `relative`."),
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum GutterType {
/// Show diagnostics and other features like breakpoints
Diagnostics,
/// Show line numbers
LineNumbers,
}
impl std::str::FromStr for GutterType {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"diagnostics" => Ok(Self::Diagnostics),
"line-numbers" => Ok(Self::LineNumbers),
_ => anyhow::bail!("Gutter type can only be `diagnostics` or `line-numbers`."),
}
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(default)]
pub struct WhitespaceConfig {
pub render: WhitespaceRender,
pub characters: WhitespaceCharacters,
}
impl Default for WhitespaceConfig {
fn default() -> Self {
Self {
render: WhitespaceRender::Basic(WhitespaceRenderValue::None),
characters: WhitespaceCharacters::default(),
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(untagged, rename_all = "kebab-case")]
pub enum WhitespaceRender {
Basic(WhitespaceRenderValue),
Specific {
default: Option<WhitespaceRenderValue>,
space: Option<WhitespaceRenderValue>,
tab: Option<WhitespaceRenderValue>,
newline: Option<WhitespaceRenderValue>,
},
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum WhitespaceRenderValue {
None,
// TODO
// Selection,
All,
}
impl WhitespaceRender {
pub fn space(&self) -> WhitespaceRenderValue {
match *self {
Self::Basic(val) => val,
Self::Specific { default, space, .. } => {
space.or(default).unwrap_or(WhitespaceRenderValue::None)
}
}
}
pub fn tab(&self) -> WhitespaceRenderValue {
match *self {
Self::Basic(val) => val,
Self::Specific { default, tab, .. } => {
tab.or(default).unwrap_or(WhitespaceRenderValue::None)
}
}
}
pub fn newline(&self) -> WhitespaceRenderValue {
match *self {
Self::Basic(val) => val,
Self::Specific {
default, newline, ..
} => newline.or(default).unwrap_or(WhitespaceRenderValue::None),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(default)]
pub struct WhitespaceCharacters {
pub space: char,
pub tab: char,
pub newline: char,
}
impl Default for WhitespaceCharacters {
fn default() -> Self {
Self {
space: '·', // U+00B7
tab: '→', // U+2192
newline: '⏎', // U+23CE
}
}
}
impl Default for Config {
fn default() -> Self {
Self {
scrolloff: 5,
scroll_lines: 3,
mouse: true,
shell: if cfg!(windows) {
vec!["cmd".to_owned(), "/C".to_owned()]
} else {
vec!["sh".to_owned(), "-c".to_owned()]
},
line_number: LineNumber::Absolute,
gutters: vec![GutterType::Diagnostics, GutterType::LineNumbers],
middle_click_paste: true,
auto_pairs: AutoPairConfig::default(),
auto_completion: true,
idle_timeout: Duration::from_millis(400),
completion_trigger_len: 2,
auto_info: true,
file_picker: FilePickerConfig::default(),
cursor_shape: CursorShapeConfig::default(),
true_color: false,
search: SearchConfig::default(),
lsp: LspConfig::default(),
rulers: Vec::new(),
whitespace: WhitespaceConfig::default(),
}
}
}
impl Default for SearchConfig {
fn default() -> Self {
Self {
wrap_around: true,
smart_case: true,
}
}
}
pub struct Motion(pub Box<dyn Fn(&mut Editor)>);
impl Motion {
pub fn run(&self, e: &mut Editor) {
(self.0)(e)
}
}
impl std::fmt::Debug for Motion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("motion")
}
}
#[derive(Debug, Clone, Default)]
pub struct Breakpoint {
pub id: Option<usize>,
pub verified: bool,
pub message: Option<String>,
pub line: usize,
pub column: Option<usize>,
pub condition: Option<String>,
pub hit_condition: Option<String>,
pub log_message: Option<String>,
}
pub struct Editor {
pub tree: Tree,
pub next_document_id: DocumentId,
pub documents: BTreeMap<DocumentId, Document>,
pub count: Option<std::num::NonZeroUsize>,
pub selected_register: Option<char>,
pub registers: Registers,
pub macro_recording: Option<(char, Vec<KeyEvent>)>,
pub theme: Theme,
pub language_servers: helix_lsp::Registry,
pub debugger: Option<dap::Client>,
pub debugger_events: SelectAll<UnboundedReceiverStream<dap::Payload>>,
pub breakpoints: HashMap<PathBuf, Vec<Breakpoint>>,
pub clipboard_provider: Box<dyn ClipboardProvider>,
pub syn_loader: Arc<syntax::Loader>,
pub theme_loader: Arc<theme::Loader>,
pub status_msg: Option<(Cow<'static, str>, Severity)>,
pub autoinfo: Option<Info>,
pub config: Box<dyn DynAccess<Config>>,
pub auto_pairs: Option<AutoPairs>,
pub idle_timer: Pin<Box<Sleep>>,
pub last_motion: Option<Motion>,
pub pseudo_pending: Option<String>,
pub last_completion: Option<CompleteAction>,
pub exit_code: i32,
pub config_events: (UnboundedSender<ConfigEvent>, UnboundedReceiver<ConfigEvent>),
}
#[derive(Debug, Clone)]
pub enum ConfigEvent {
Refresh,
Update(Box<Config>),
}
#[derive(Debug, Clone)]
pub struct CompleteAction {
pub trigger_offset: usize,
pub changes: Vec<Change>,
}
#[derive(Debug, Copy, Clone)]
pub enum Action {
Load,
Replace,
HorizontalSplit,
VerticalSplit,
}
impl Editor {
pub fn new(
mut area: Rect,
theme_loader: Arc<theme::Loader>,
syn_loader: Arc<syntax::Loader>,
config: Box<dyn DynAccess<Config>>,
) -> Self {
let language_servers = helix_lsp::Registry::new();
let conf = config.load();
let auto_pairs = (&conf.auto_pairs).into();
// HAXX: offset the render area height by 1 to account for prompt/commandline
area.height -= 1;
Self {
tree: Tree::new(area),
next_document_id: DocumentId::default(),
documents: BTreeMap::new(),
count: None,
selected_register: None,
macro_recording: None,
theme: theme_loader.default(),
language_servers,
debugger: None,
debugger_events: SelectAll::new(),
breakpoints: HashMap::new(),
syn_loader,
theme_loader,
registers: Registers::default(),
clipboard_provider: get_clipboard_provider(),
status_msg: None,
autoinfo: None,
idle_timer: Box::pin(sleep(conf.idle_timeout)),
last_motion: None,
last_completion: None,
pseudo_pending: None,
config,
auto_pairs,
exit_code: 0,
config_events: unbounded_channel(),
}
}
pub fn config(&self) -> DynGuard<Config> {
self.config.load()
}
pub fn clear_idle_timer(&mut self) {
// equivalent to internal Instant::far_future() (30 years)
self.idle_timer
.as_mut()
.reset(Instant::now() + Duration::from_secs(86400 * 365 * 30));
}
pub fn reset_idle_timer(&mut self) {
let config = self.config();
self.idle_timer
.as_mut()
.reset(Instant::now() + config.idle_timeout);
}
pub fn clear_status(&mut self) {
self.status_msg = None;
}
#[inline]
pub fn set_status<T: Into<Cow<'static, str>>>(&mut self, status: T) {
self.status_msg = Some((status.into(), Severity::Info));
}
#[inline]
pub fn set_error<T: Into<Cow<'static, str>>>(&mut self, error: T) {
self.status_msg = Some((error.into(), Severity::Error));
}
pub fn set_theme(&mut self, theme: Theme) {
// `ui.selection` is the only scope required to be able to render a theme.
if theme.find_scope_index("ui.selection").is_none() {
self.set_error("Invalid theme: `ui.selection` required");
return;
}
let scopes = theme.scopes();
self.syn_loader.set_scopes(scopes.to_vec());
self.theme = theme;
self._refresh();
}
/// Refreshes the language server for a given document
pub fn refresh_language_server(&mut self, doc_id: DocumentId) -> Option<()> {
let doc = self.documents.get_mut(&doc_id)?;
Self::launch_language_server(&mut self.language_servers, doc)
}
/// Launch a language server for a given document
fn launch_language_server(ls: &mut helix_lsp::Registry, doc: &mut Document) -> Option<()> {
// if doc doesn't have a URL it's a scratch buffer, ignore it
let doc_url = doc.url()?;
// try to find a language server based on the language name
let language_server = doc.language.as_ref().and_then(|language| {
ls.get(language)
.map_err(|e| {
log::error!(
"Failed to initialize the LSP for `{}` {{ {} }}",
language.scope(),
e
)
})
.ok()
});
if let Some(language_server) = language_server {
// only spawn a new lang server if the servers aren't the same
if Some(language_server.id()) != doc.language_server().map(|server| server.id()) {
if let Some(language_server) = doc.language_server() {
tokio::spawn(language_server.text_document_did_close(doc.identifier()));
}
let language_id = doc.language_id().map(ToOwned::to_owned).unwrap_or_default();
// TODO: this now races with on_init code if the init happens too quickly
tokio::spawn(language_server.text_document_did_open(
doc_url,
doc.version(),
doc.text(),
language_id,
));
doc.set_language_server(Some(language_server));
}
}
Some(())
}
fn _refresh(&mut self) {
let config = self.config();
for (view, _) in self.tree.views_mut() {
let doc = &self.documents[&view.doc];
view.ensure_cursor_in_view(doc, config.scrolloff)
}
}
fn replace_document_in_view(&mut self, current_view: ViewId, doc_id: DocumentId) {
let view = self.tree.get_mut(current_view);
view.doc = doc_id;
view.offset = Position::default();
let doc = self.documents.get_mut(&doc_id).unwrap();
// initialize selection for view
doc.selections
.entry(view.id)
.or_insert_with(|| Selection::point(0));
// TODO: reuse align_view
let pos = doc
.selection(view.id)
.primary()
.cursor(doc.text().slice(..));
let line = doc.text().char_to_line(pos);
view.offset.row = line.saturating_sub(view.inner_area().height as usize / 2);
}
pub fn switch(&mut self, id: DocumentId, action: Action) {
use crate::tree::Layout;
if !self.documents.contains_key(&id) {
log::error!("cannot switch to document that does not exist (anymore)");
return;
}
match action {
Action::Replace => {
let (view, doc) = current_ref!(self);
// If the current view is an empty scratch buffer and is not displayed in any other views, delete it.
// Boolean value is determined before the call to `view_mut` because the operation requires a borrow
// of `self.tree`, which is mutably borrowed when `view_mut` is called.
let remove_empty_scratch = !doc.is_modified()
// If the buffer has no path and is not modified, it is an empty scratch buffer.
&& doc.path().is_none()
// If the buffer we are changing to is not this buffer
&& id != doc.id
// Ensure the buffer is not displayed in any other splits.
&& !self
.tree
.traverse()
.any(|(_, v)| v.doc == doc.id && v.id != view.id);
let (view, doc) = current!(self);
if remove_empty_scratch {
// Copy `doc.id` into a variable before calling `self.documents.remove`, which requires a mutable
// borrow, invalidating direct access to `doc.id`.
let id = doc.id;
self.documents.remove(&id);
} else {
let jump = (view.doc, doc.selection(view.id).clone());
view.jumps.push(jump);
// Set last accessed doc if it is a different document
if doc.id != id {
view.last_accessed_doc = Some(view.doc);
// Set last modified doc if modified and last modified doc is different
if std::mem::take(&mut doc.modified_since_accessed)
&& view.last_modified_docs[0] != Some(view.doc)
{
view.last_modified_docs = [Some(view.doc), view.last_modified_docs[0]];
}
}
}
let view_id = view.id;
self.replace_document_in_view(view_id, id);
return;
}
Action::Load => {
let view_id = view!(self).id;
let doc = self.documents.get_mut(&id).unwrap();
if doc.selections().is_empty() {
doc.selections.insert(view_id, Selection::point(0));
}
return;
}
Action::HorizontalSplit | Action::VerticalSplit => {
let view = View::new(id, self.config().gutters.clone());
let view_id = self.tree.split(
view,
match action {
Action::HorizontalSplit => Layout::Horizontal,
Action::VerticalSplit => Layout::Vertical,
_ => unreachable!(),
},
);
// initialize selection for view
let doc = self.documents.get_mut(&id).unwrap();
doc.selections.insert(view_id, Selection::point(0));
}
}
self._refresh();
}
/// Generate an id for a new document and register it.
fn new_document(&mut self, mut doc: Document) -> DocumentId {
let id = self.next_document_id;
// Safety: adding 1 from 1 is fine, probably impossible to reach usize max
self.next_document_id =
DocumentId(unsafe { NonZeroUsize::new_unchecked(self.next_document_id.0.get() + 1) });
doc.id = id;
self.documents.insert(id, doc);
id
}
fn new_file_from_document(&mut self, action: Action, doc: Document) -> DocumentId {
let id = self.new_document(doc);
self.switch(id, action);
id
}
pub fn new_file(&mut self, action: Action) -> DocumentId {
self.new_file_from_document(action, Document::default())
}
pub fn new_file_from_stdin(&mut self, action: Action) -> Result<DocumentId, Error> {
let (rope, encoding) = crate::document::from_reader(&mut stdin(), None)?;
Ok(self.new_file_from_document(action, Document::from(rope, Some(encoding))))
}
pub fn open(&mut self, path: PathBuf, action: Action) -> Result<DocumentId, Error> {
let path = helix_core::path::get_canonicalized_path(&path)?;
let id = self.document_by_path(&path).map(|doc| doc.id);
let id = if let Some(id) = id {
id
} else {
let mut doc = Document::open(&path, None, Some(self.syn_loader.clone()))?;
let _ = Self::launch_language_server(&mut self.language_servers, &mut doc);
self.new_document(doc)
};
self.switch(id, action);
Ok(id)
}
pub fn close(&mut self, id: ViewId) {
let view = self.tree.get(self.tree.focus);
// remove selection
self.documents
.get_mut(&view.doc)
.unwrap()
.selections
.remove(&id);
self.tree.remove(id);
self._refresh();
}
pub fn close_document(&mut self, doc_id: DocumentId, force: bool) -> anyhow::Result<()> {
let doc = match self.documents.get(&doc_id) {
Some(doc) => doc,
None => bail!("document does not exist"),
};
if !force && doc.is_modified() {
bail!(
"buffer {:?} is modified",
doc.relative_path()
.map(|path| path.to_string_lossy().to_string())
.unwrap_or_else(|| SCRATCH_BUFFER_NAME.into())
);
}
if let Some(language_server) = doc.language_server() {
tokio::spawn(language_server.text_document_did_close(doc.identifier()));
}
let views_to_close = self
.tree
.views()
.filter_map(|(view, _focus)| {
if view.doc == doc_id {
Some(view.id)
} else {
None
}
})
.collect::<Vec<_>>();
for view_id in views_to_close {
self.close(view_id);
}
self.documents.remove(&doc_id);
// If the document we removed was visible in all views, we will have no more views. We don't
// want to close the editor just for a simple buffer close, so we need to create a new view
// containing either an existing document, or a brand new document.
if self.tree.views().next().is_none() {
let doc_id = self
.documents
.iter()
.map(|(&doc_id, _)| doc_id)
.next()
.unwrap_or_else(|| self.new_document(Document::default()));
let view = View::new(doc_id, self.config().gutters.clone());
let view_id = self.tree.insert(view);
let doc = self.documents.get_mut(&doc_id).unwrap();
doc.selections.insert(view_id, Selection::point(0));
}
self._refresh();
Ok(())
}
pub fn resize(&mut self, area: Rect) {
if self.tree.resize(area) {
self._refresh();
};
}
pub fn focus_next(&mut self) {
self.tree.focus_next();
}
pub fn focus_right(&mut self) {
self.tree.focus_direction(tree::Direction::Right);
}
pub fn focus_left(&mut self) {
self.tree.focus_direction(tree::Direction::Left);
}
pub fn focus_up(&mut self) {
self.tree.focus_direction(tree::Direction::Up);
}
pub fn focus_down(&mut self) {
self.tree.focus_direction(tree::Direction::Down);
}
pub fn should_close(&self) -> bool {
self.tree.is_empty()
}
pub fn ensure_cursor_in_view(&mut self, id: ViewId) {
let config = self.config();
let view = self.tree.get_mut(id);
let doc = &self.documents[&view.doc];
view.ensure_cursor_in_view(doc, config.scrolloff)
}
#[inline]
pub fn document(&self, id: DocumentId) -> Option<&Document> {
self.documents.get(&id)
}
#[inline]
pub fn document_mut(&mut self, id: DocumentId) -> Option<&mut Document> {
self.documents.get_mut(&id)
}
#[inline]
pub fn documents(&self) -> impl Iterator<Item = &Document> {
self.documents.values()
}
#[inline]
pub fn documents_mut(&mut self) -> impl Iterator<Item = &mut Document> {
self.documents.values_mut()
}
pub fn document_by_path<P: AsRef<Path>>(&self, path: P) -> Option<&Document> {
self.documents()
.find(|doc| doc.path().map(|p| p == path.as_ref()).unwrap_or(false))
}
pub fn document_by_path_mut<P: AsRef<Path>>(&mut self, path: P) -> Option<&mut Document> {
self.documents_mut()
.find(|doc| doc.path().map(|p| p == path.as_ref()).unwrap_or(false))
}
pub fn cursor(&self) -> (Option<Position>, CursorKind) {
let config = self.config();
let (view, doc) = current_ref!(self);
let cursor = doc
.selection(view.id)
.primary()
.cursor(doc.text().slice(..));
if let Some(mut pos) = view.screen_coords_at_pos(doc, doc.text().slice(..), cursor) {
let inner = view.inner_area();
pos.col += inner.x as usize;
pos.row += inner.y as usize;
let cursorkind = config.cursor_shape.from_mode(doc.mode());
(Some(pos), cursorkind)
} else {
(None, CursorKind::default())
}
}
/// Closes language servers with timeout. The default timeout is 500 ms, use
/// `timeout` parameter to override this.
pub async fn close_language_servers(
&self,
timeout: Option<u64>,
) -> Result<(), tokio::time::error::Elapsed> {
tokio::time::timeout(
Duration::from_millis(timeout.unwrap_or(500)),
future::join_all(
self.language_servers
.iter_clients()
.map(|client| client.force_shutdown()),
),
)
.await
.map(|_| ())
}
}
|
LineNumber
|
benchmark_spec.py
|
# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Container for all data required for a benchmark to run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import copy
import datetime
import importlib
import logging
import os
import pickle
import threading
import uuid
from perfkitbenchmarker import benchmark_status
from perfkitbenchmarker import capacity_reservation
from perfkitbenchmarker import cloud_tpu
from perfkitbenchmarker import container_service
from perfkitbenchmarker import context
from perfkitbenchmarker import disk
from perfkitbenchmarker import dpb_service
from perfkitbenchmarker import edw_service
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import managed_relational_db
from perfkitbenchmarker import nfs_service
from perfkitbenchmarker import os_types
from perfkitbenchmarker import provider_info
from perfkitbenchmarker import providers
from perfkitbenchmarker import smb_service
from perfkitbenchmarker import spark_service
from perfkitbenchmarker import stages
from perfkitbenchmarker import static_virtual_machine as static_vm
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
import six
from six.moves import range
import six.moves._thread
import six.moves.copyreg
def PickleLock(lock):
return UnPickleLock, (lock.locked(),)
def UnPickleLock(locked, *args):
lock = threading.Lock()
if locked:
if not lock.acquire(False):
raise pickle.UnpicklingError('Cannot acquire lock')
return lock
six.moves.copyreg.pickle(six.moves._thread.LockType, PickleLock)
SUPPORTED = 'strict'
NOT_EXCLUDED = 'permissive'
SKIP_CHECK = 'none'
# GCP labels only allow hyphens (-), underscores (_), lowercase characters, and
# numbers and International characters.
# metadata allow all characters and numbers.
METADATA_TIME_FORMAT = '%Y%m%dt%H%M%Sz'
FLAGS = flags.FLAGS
flags.DEFINE_enum('cloud', providers.GCP, providers.VALID_CLOUDS,
'Name of the cloud to use.')
flags.DEFINE_string('scratch_dir', None,
'Base name for all scratch disk directories in the VM. '
'Upon creation, these directories will have numbers '
'appended to them (for example /scratch0, /scratch1, etc).')
flags.DEFINE_string('startup_script', None,
'Script to run right after vm boot.')
flags.DEFINE_string('postrun_script', None,
'Script to run right after run stage.')
flags.DEFINE_integer('create_and_boot_post_task_delay', None,
'Delay in seconds to delay in between boot tasks.')
# pyformat: disable
flags.DEFINE_enum('benchmark_compatibility_checking', SUPPORTED,
[SUPPORTED, NOT_EXCLUDED, SKIP_CHECK],
'Method used to check compatibility between the benchmark '
' and the cloud. ' + SUPPORTED + ' runs the benchmark only'
' if the cloud provider has declared it supported. ' +
NOT_EXCLUDED + ' runs the benchmark unless it has been'
' declared not supported by the cloud provider. ' + SKIP_CHECK
+ ' does not do the compatibility'
' check.')
# pyformat: enable
class BenchmarkSpec(object):
"""Contains the various data required to make a benchmark run."""
total_benchmarks = 0
def __init__(self, benchmark_module, benchmark_config, benchmark_uid):
"""Initialize a BenchmarkSpec object.
Args:
benchmark_module: The benchmark module object.
benchmark_config: BenchmarkConfigSpec. The configuration for the
benchmark.
benchmark_uid: An identifier unique to this run of the benchmark even
if the same benchmark is run multiple times with different configs.
"""
self.config = benchmark_config
self.name = benchmark_module.BENCHMARK_NAME
self.uid = benchmark_uid
self.status = benchmark_status.SKIPPED
self.failed_substatus = None
self.status_detail = None
BenchmarkSpec.total_benchmarks += 1
self.sequence_number = BenchmarkSpec.total_benchmarks
self.vms = []
self.networks = {}
self.firewalls = {}
self.networks_lock = threading.Lock()
self.firewalls_lock = threading.Lock()
self.vm_groups = {}
self.container_specs = benchmark_config.container_specs or {}
self.container_registry = None
self.deleted = False
self.uuid = '%s-%s' % (FLAGS.run_uri, uuid.uuid4())
self.always_call_cleanup = False
self.spark_service = None
self.dpb_service = None
self.container_cluster = None
self.managed_relational_db = None
self.tpus = []
self.tpu_groups = {}
self.edw_service = None
self.nfs_service = None
self.smb_service = None
self.app_groups = {}
self._zone_index = 0
self.capacity_reservations = []
# Modules can't be pickled, but functions can, so we store the functions
# necessary to run the benchmark.
self.BenchmarkPrepare = benchmark_module.Prepare
self.BenchmarkRun = benchmark_module.Run
self.BenchmarkCleanup = benchmark_module.Cleanup
# Set the current thread's BenchmarkSpec object to this one.
context.SetThreadBenchmarkSpec(self)
def __repr__(self):
return '%s(%r)' % (self.__class__, self.__dict__)
def __str__(self):
return(
'Benchmark name: {0}\nFlags: {1}'
.format(self.name, self.config.flags))
@contextlib.contextmanager
def RedirectGlobalFlags(self):
"""Redirects flag reads and writes to the benchmark-specific flags object.
Within the enclosed code block, reads and writes to the flags.FLAGS object
are redirected to a copy that has been merged with config-provided flag
overrides specific to this benchmark run.
"""
with self.config.RedirectFlags(FLAGS):
yield
def ConstructContainerCluster(self):
"""Create the container cluster."""
if self.config.container_cluster is None:
return
cloud = self.config.container_cluster.cloud
cluster_type = self.config.container_cluster.type
providers.LoadProvider(cloud)
container_cluster_class = container_service.GetContainerClusterClass(
cloud, cluster_type)
self.container_cluster = container_cluster_class(
self.config.container_cluster)
def ConstructContainerRegistry(self):
"""Create the container registry."""
if self.config.container_registry is None:
return
cloud = self.config.container_registry.cloud
providers.LoadProvider(cloud)
container_registry_class = container_service.GetContainerRegistryClass(
cloud)
self.container_registry = container_registry_class(
self.config.container_registry)
def ConstructDpbService(self):
"""Create the dpb_service object and create groups for its vms."""
if self.config.dpb_service is None:
return
providers.LoadProvider(self.config.dpb_service.worker_group.cloud)
dpb_service_class = dpb_service.GetDpbServiceClass(
self.config.dpb_service.service_type)
self.dpb_service = dpb_service_class(self.config.dpb_service)
def ConstructManagedRelationalDb(self):
"""Create the managed relational db and create groups for its vms."""
if self.config.managed_relational_db is None:
return
cloud = self.config.managed_relational_db.cloud
providers.LoadProvider(cloud)
managed_relational_db_class = (
managed_relational_db.GetManagedRelationalDbClass(cloud))
self.managed_relational_db = managed_relational_db_class(
self.config.managed_relational_db)
def ConstructTpuGroup(self, group_spec):
"""Constructs the BenchmarkSpec's cloud TPU objects."""
if group_spec is None:
return
cloud = group_spec.cloud
providers.LoadProvider(cloud)
tpu_class = cloud_tpu.GetTpuClass(cloud)
return tpu_class(group_spec)
def ConstructTpu(self):
"""Constructs the BenchmarkSpec's cloud TPU objects."""
tpu_group_specs = self.config.tpu_groups
for group_name, group_spec in sorted(six.iteritems(tpu_group_specs)):
tpu = self.ConstructTpuGroup(group_spec)
self.tpu_groups[group_name] = tpu
self.tpus.append(tpu)
def ConstructEdwService(self):
"""Create the edw_service object."""
if self.config.edw_service is None:
return
# Load necessary modules from the provider to account for dependencies
providers.LoadProvider(
edw_service.TYPE_2_PROVIDER.get(self.config.edw_service.type))
# Load the module for the edw service based on type
edw_service_module = importlib.import_module(edw_service.TYPE_2_MODULE.get(
self.config.edw_service.type))
edw_service_class = getattr(edw_service_module,
self.config.edw_service.type[0].upper() +
self.config.edw_service.type[1:])
# Check if a new instance needs to be created or restored from snapshot
self.edw_service = edw_service_class(self.config.edw_service)
def ConstructNfsService(self):
"""Construct the NFS service object.
Creates an NFS Service only if an NFS disk is found in the disk_specs.
"""
if self.nfs_service:
logging.info('NFS service already created: %s', self.nfs_service)
return
for group_spec in self.config.vm_groups.values():
if not group_spec.disk_spec or not group_spec.vm_count:
continue
disk_spec = group_spec.disk_spec
if disk_spec.disk_type != disk.NFS:
continue
if disk_spec.nfs_ip_address:
self.nfs_service = nfs_service.StaticNfsService(disk_spec)
else:
cloud = group_spec.cloud
providers.LoadProvider(cloud)
nfs_class = nfs_service.GetNfsServiceClass(cloud)
self.nfs_service = nfs_class(disk_spec, group_spec.vm_spec.zone)
logging.debug('NFS service %s', self.nfs_service)
break
def ConstructSmbService(self):
"""Construct the SMB service object.
Creates an SMB Service only if an SMB disk is found in the disk_specs.
"""
if self.smb_service:
logging.info('SMB service already created: %s', self.smb_service)
return
for group_spec in self.config.vm_groups.values():
if not group_spec.disk_spec or not group_spec.vm_count:
continue
disk_spec = group_spec.disk_spec
if disk_spec.disk_type != disk.SMB:
continue
cloud = group_spec.cloud
providers.LoadProvider(cloud)
smb_class = smb_service.GetSmbServiceClass(cloud)
self.smb_service = smb_class(disk_spec, group_spec.vm_spec.zone)
logging.debug('SMB service %s', self.smb_service)
break
def ConstructVirtualMachineGroup(self, group_name, group_spec):
"""Construct the virtual machine(s) needed for a group."""
vms = []
vm_count = group_spec.vm_count
disk_count = group_spec.disk_count
# First create the Static VM objects.
if group_spec.static_vms:
specs = [
spec for spec in group_spec.static_vms
if (FLAGS.static_vm_tags is None or spec.tag in FLAGS.static_vm_tags)
][:vm_count]
for vm_spec in specs:
static_vm_class = static_vm.GetStaticVmClass(vm_spec.os_type)
vms.append(static_vm_class(vm_spec))
os_type = group_spec.os_type
cloud = group_spec.cloud
# This throws an exception if the benchmark is not
# supported.
self._CheckBenchmarkSupport(cloud)
# Then create the remaining VM objects using VM and disk specs.
if group_spec.disk_spec:
disk_spec = group_spec.disk_spec
# disk_spec.disk_type may contain legacy values that were
# copied from FLAGS.scratch_disk_type into
# FLAGS.data_disk_type at the beginning of the run. We
# translate them here, rather than earlier, because here is
# where we know what cloud we're using and therefore we're
# able to pick the right translation table.
disk_spec.disk_type = disk.WarnAndTranslateDiskTypes(
disk_spec.disk_type, cloud)
else:
disk_spec = None
for _ in range(vm_count - len(vms)):
# Assign a zone to each VM sequentially from the --zones flag.
if FLAGS.zones or FLAGS.extra_zones or FLAGS.zone:
zone_list = FLAGS.zones + FLAGS.extra_zones + FLAGS.zone
group_spec.vm_spec.zone = zone_list[self._zone_index]
self._zone_index = (self._zone_index + 1
if self._zone_index < len(zone_list) - 1 else 0)
vm = self._CreateVirtualMachine(group_spec.vm_spec, os_type, cloud)
if disk_spec and not vm.is_static:
if disk_spec.disk_type == disk.LOCAL and disk_count is None:
disk_count = vm.max_local_disks
vm.disk_specs = [copy.copy(disk_spec) for _ in range(disk_count)]
# In the event that we need to create multiple disks from the same
# DiskSpec, we need to ensure that they have different mount points.
if (disk_count > 1 and disk_spec.mount_point):
for i, spec in enumerate(vm.disk_specs):
spec.mount_point += str(i)
vms.append(vm)
return vms
def
|
(self):
"""Construct capacity reservations for each VM group."""
if not FLAGS.use_capacity_reservations:
return
for vm_group in six.itervalues(self.vm_groups):
cloud = vm_group[0].CLOUD
providers.LoadProvider(cloud)
capacity_reservation_class = capacity_reservation.GetResourceClass(
cloud)
self.capacity_reservations.append(
capacity_reservation_class(vm_group))
def _CheckBenchmarkSupport(self, cloud):
"""Throw an exception if the benchmark isn't supported."""
if FLAGS.benchmark_compatibility_checking == SKIP_CHECK:
return
provider_info_class = provider_info.GetProviderInfoClass(cloud)
benchmark_ok = provider_info_class.IsBenchmarkSupported(self.name)
if FLAGS.benchmark_compatibility_checking == NOT_EXCLUDED:
if benchmark_ok is None:
benchmark_ok = True
if not benchmark_ok:
raise ValueError('Provider {0} does not support {1}. Use '
'--benchmark_compatibility_checking=none '
'to override this check.'.format(
provider_info_class.CLOUD, self.name))
def _ConstructJujuController(self, group_spec):
"""Construct a VirtualMachine object for a Juju controller."""
juju_spec = copy.copy(group_spec)
juju_spec.vm_count = 1
jujuvms = self.ConstructVirtualMachineGroup('juju', juju_spec)
if len(jujuvms):
jujuvm = jujuvms.pop()
jujuvm.is_controller = True
return jujuvm
return None
def ConstructVirtualMachines(self):
"""Constructs the BenchmarkSpec's VirtualMachine objects."""
vm_group_specs = self.config.vm_groups
clouds = {}
for group_name, group_spec in sorted(six.iteritems(vm_group_specs)):
vms = self.ConstructVirtualMachineGroup(group_name, group_spec)
if group_spec.os_type == os_types.JUJU:
# The Juju VM needs to be created first, so that subsequent units can
# be properly added under its control.
if group_spec.cloud in clouds:
jujuvm = clouds[group_spec.cloud]
else:
jujuvm = self._ConstructJujuController(group_spec)
clouds[group_spec.cloud] = jujuvm
for vm in vms:
vm.controller = clouds[group_spec.cloud]
vm.vm_group = group_name
jujuvm.units.extend(vms)
if jujuvm and jujuvm not in self.vms:
self.vms.extend([jujuvm])
self.vm_groups['%s_juju_controller' % group_spec.cloud] = [jujuvm]
self.vm_groups[group_name] = vms
self.vms.extend(vms)
# If we have a spark service, it needs to access the master_group and
# the worker group.
if (self.config.spark_service and
self.config.spark_service.service_type == spark_service.PKB_MANAGED):
for group_name in 'master_group', 'worker_group':
self.spark_service.vms[group_name] = self.vm_groups[group_name]
def ConstructSparkService(self):
"""Create the spark_service object and create groups for its vms."""
if self.config.spark_service is None:
return
spark_spec = self.config.spark_service
# Worker group is required, master group is optional
cloud = spark_spec.worker_group.cloud
if spark_spec.master_group:
cloud = spark_spec.master_group.cloud
providers.LoadProvider(cloud)
service_type = spark_spec.service_type
spark_service_class = spark_service.GetSparkServiceClass(
cloud, service_type)
self.spark_service = spark_service_class(spark_spec)
# If this is Pkb managed, the benchmark spec needs to adopt vms.
if service_type == spark_service.PKB_MANAGED:
for name, spec in [('master_group', spark_spec.master_group),
('worker_group', spark_spec.worker_group)]:
if name in self.config.vm_groups:
raise Exception('Cannot have a vm group {0} with a {1} spark '
'service'.format(name, spark_service.PKB_MANAGED))
self.config.vm_groups[name] = spec
def Prepare(self):
targets = [(vm.PrepareBackgroundWorkload, (), {}) for vm in self.vms]
vm_util.RunParallelThreads(targets, len(targets))
def Provision(self):
"""Prepares the VMs and networks necessary for the benchmark to run."""
# Create capacity reservations if the cloud supports it. Note that the
# capacity reservation class may update the VMs themselves. This is true
# on AWS, because the VM needs to be aware of the capacity resrevation id
# before its Create() method is called. Furthermore, if the user does not
# specify an AWS zone, but a region instead, the AwsCapacityReservation
# class will make a reservation in a zone that has sufficient capacity.
# In this case the VM's zone attribute, and the VMs network instance
# need to be updated as well.
if self.capacity_reservations:
vm_util.RunThreaded(lambda res: res.Create(), self.capacity_reservations)
# Sort networks into a guaranteed order of creation based on dict key.
# There is a finite limit on the number of threads that are created to
# provision networks. Until support is added to provision resources in an
# order based on dependencies, this key ordering can be used to avoid
# deadlock by placing dependent networks later and their dependencies
# earlier. As an example, AWS stores both per-region and per-zone objects
# in this dict, and each per-zone object depends on a corresponding
# per-region object, so the per-region objects are given keys that come
# first when sorted.
networks = [self.networks[key]
for key in sorted(six.iterkeys(self.networks))]
vm_util.RunThreaded(lambda net: net.Create(), networks)
if self.container_registry:
self.container_registry.Create()
for container_spec in six.itervalues(self.container_specs):
if container_spec.static_image:
continue
container_spec.image = self.container_registry.GetOrBuild(
container_spec.image)
if self.container_cluster:
self.container_cluster.Create()
# do after network setup but before VM created
if self.nfs_service:
self.nfs_service.Create()
if self.smb_service:
self.smb_service.Create()
if self.vms:
# We separate out creating, booting, and preparing the VMs into two phases
# so that we don't slow down the creation of all the VMs by running
# commands on the VMs that booted.
vm_util.RunThreaded(
self.CreateAndBootVm,
self.vms,
post_task_delay=FLAGS.create_and_boot_post_task_delay)
vm_util.RunThreaded(self.PrepareVmAfterBoot, self.vms)
sshable_vms = [
vm for vm in self.vms if vm.OS_TYPE not in os_types.WINDOWS_OS_TYPES
]
sshable_vm_groups = {}
for group_name, group_vms in six.iteritems(self.vm_groups):
sshable_vm_groups[group_name] = [
vm for vm in group_vms
if vm.OS_TYPE not in os_types.WINDOWS_OS_TYPES
]
vm_util.GenerateSSHConfig(sshable_vms, sshable_vm_groups)
if self.spark_service:
self.spark_service.Create()
if self.dpb_service:
self.dpb_service.Create()
if self.managed_relational_db:
self.managed_relational_db.client_vm = self.vms[0]
self.managed_relational_db.Create()
if self.tpus:
vm_util.RunThreaded(lambda tpu: tpu.Create(), self.tpus)
if self.edw_service:
if not self.edw_service.user_managed:
# The benchmark creates the Redshift cluster's subnet group in the
# already provisioned virtual private cloud (vpc).
for network in networks:
if network.__class__.__name__ == 'AwsNetwork':
self.config.edw_service.subnet_id = network.subnet.id
self.edw_service.Create()
def Delete(self):
if self.deleted:
return
if self.container_registry:
self.container_registry.Delete()
if self.spark_service:
self.spark_service.Delete()
if self.dpb_service:
self.dpb_service.Delete()
if self.managed_relational_db:
self.managed_relational_db.Delete()
if self.tpus:
vm_util.RunThreaded(lambda tpu: tpu.Delete(), self.tpus)
if self.edw_service:
self.edw_service.Delete()
if self.nfs_service:
self.nfs_service.Delete()
if self.smb_service:
self.smb_service.Delete()
# Note: It is ok to delete capacity reservations before deleting the VMs,
# and will actually save money (mere seconds of usage).
if self.capacity_reservations:
try:
vm_util.RunThreaded(lambda reservation: reservation.Delete(),
self.capacity_reservations)
except Exception: # pylint: disable=broad-except
logging.exception('Got an exception deleting CapacityReservations. '
'Attempting to continue tearing down.')
if self.vms:
try:
vm_util.RunThreaded(self.DeleteVm, self.vms)
except Exception:
logging.exception('Got an exception deleting VMs. '
'Attempting to continue tearing down.')
for firewall in six.itervalues(self.firewalls):
try:
firewall.DisallowAllPorts()
except Exception:
logging.exception('Got an exception disabling firewalls. '
'Attempting to continue tearing down.')
if self.container_cluster:
self.container_cluster.DeleteServices()
self.container_cluster.DeleteContainers()
self.container_cluster.Delete()
for net in six.itervalues(self.networks):
try:
net.Delete()
except Exception:
logging.exception('Got an exception deleting networks. '
'Attempting to continue tearing down.')
self.deleted = True
def GetSamples(self):
"""Returns samples created from benchmark resources."""
samples = []
if self.container_cluster:
samples.extend(self.container_cluster.GetSamples())
if self.container_registry:
samples.extend(self.container_registry.GetSamples())
return samples
def StartBackgroundWorkload(self):
targets = [(vm.StartBackgroundWorkload, (), {}) for vm in self.vms]
vm_util.RunParallelThreads(targets, len(targets))
def StopBackgroundWorkload(self):
targets = [(vm.StopBackgroundWorkload, (), {}) for vm in self.vms]
vm_util.RunParallelThreads(targets, len(targets))
def _GetResourceDict(self, time_format, timeout_minutes=None):
"""Gets a list of tags to be used to tag resources."""
now_utc = datetime.datetime.utcnow()
if not timeout_minutes:
timeout_minutes = FLAGS.timeout_minutes
timeout_utc = (
now_utc +
datetime.timedelta(minutes=timeout_minutes))
tags = {
'timeout_utc': timeout_utc.strftime(time_format),
'create_time_utc': now_utc.strftime(time_format),
'benchmark': self.name,
'perfkit_uuid': self.uuid,
'owner': FLAGS.owner
}
return tags
def GetResourceTags(self, timeout_minutes=None):
"""Gets a list of tags to be used to tag resources."""
return self._GetResourceDict(METADATA_TIME_FORMAT, timeout_minutes)
def _CreateVirtualMachine(self, vm_spec, os_type, cloud):
"""Create a vm in zone.
Args:
vm_spec: A virtual_machine.BaseVmSpec object.
os_type: The type of operating system for the VM. See the flag of the
same name for more information.
cloud: The cloud for the VM. See the flag of the same name for more
information.
Returns:
A virtual_machine.BaseVirtualMachine object.
"""
vm = static_vm.StaticVirtualMachine.GetStaticVirtualMachine()
if vm:
return vm
vm_class = virtual_machine.GetVmClass(cloud, os_type)
if vm_class is None:
raise errors.Error(
'VMs of type %s" are not currently supported on cloud "%s".' %
(os_type, cloud))
return vm_class(vm_spec)
def CreateAndBootVm(self, vm):
"""Creates a single VM and waits for boot to complete.
Args:
vm: The BaseVirtualMachine object representing the VM.
"""
vm.Create()
logging.info('VM: %s', vm.ip_address)
logging.info('Waiting for boot completion.')
vm.AllowRemoteAccessPorts()
vm.WaitForBootCompletion()
def PrepareVmAfterBoot(self, vm):
"""Prepares a VM after it has booted.
This function will prepare a scratch disk if required.
Args:
vm: The BaseVirtualMachine object representing the VM.
Raises:
Exception: If --vm_metadata is malformed.
"""
vm_metadata = {
'benchmark':
self.name,
'perfkit_uuid':
self.uuid,
'benchmark_uid':
self.uid,
'create_time_utc':
datetime.datetime.utcfromtimestamp(vm.create_start_time),
'owner':
FLAGS.owner
}
for item in FLAGS.vm_metadata:
if ':' not in item:
raise Exception('"%s" not in expected key:value format' % item)
key, value = item.split(':', 1)
vm_metadata[key] = value
vm.AddMetadata(**vm_metadata)
vm.OnStartup()
if any((spec.disk_type == disk.LOCAL for spec in vm.disk_specs)):
vm.SetupLocalDisks()
for disk_spec in vm.disk_specs:
if disk_spec.disk_type == disk.RAM:
vm.CreateRamDisk(disk_spec)
else:
vm.CreateScratchDisk(disk_spec)
# TODO(user): Simplify disk logic.
if disk_spec.num_striped_disks > 1:
# scratch disks has already been created and striped together.
break
# This must come after Scratch Disk creation to support the
# Containerized VM case
vm.PrepareVMEnvironment()
def DeleteVm(self, vm):
"""Deletes a single vm and scratch disk if required.
Args:
vm: The BaseVirtualMachine object representing the VM.
"""
if vm.is_static and vm.install_packages:
vm.PackageCleanup()
vm.Delete()
vm.DeleteScratchDisks()
@staticmethod
def _GetPickleFilename(uid):
"""Returns the filename for the pickled BenchmarkSpec."""
return os.path.join(vm_util.GetTempDir(), uid)
def Pickle(self):
"""Pickles the spec so that it can be unpickled on a subsequent run."""
with open(self._GetPickleFilename(self.uid), 'wb') as pickle_file:
pickle.dump(self, pickle_file, 2)
@classmethod
def GetBenchmarkSpec(cls, benchmark_module, config, uid):
"""Unpickles or creates a BenchmarkSpec and returns it.
Args:
benchmark_module: The benchmark module object.
config: BenchmarkConfigSpec. The configuration for the benchmark.
uid: An identifier unique to this run of the benchmark even if the same
benchmark is run multiple times with different configs.
Returns:
A BenchmarkSpec object.
"""
if stages.PROVISION in FLAGS.run_stage:
return cls(benchmark_module, config, uid)
try:
with open(cls._GetPickleFilename(uid), 'rb') as pickle_file:
spec = pickle.load(pickle_file)
except Exception as e: # pylint: disable=broad-except
logging.error('Unable to unpickle spec file for benchmark %s.',
benchmark_module.BENCHMARK_NAME)
raise e
# Always let the spec be deleted after being unpickled so that
# it's possible to run cleanup even if cleanup has already run.
spec.deleted = False
spec.status = benchmark_status.SKIPPED
context.SetThreadBenchmarkSpec(spec)
return spec
|
ConstructCapacityReservations
|
context.go
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
stderrs "errors"
"time"
"golang.org/x/net/context"
"k8s.io/client-go/1.5/pkg/auth/user"
"k8s.io/client-go/1.5/pkg/types"
)
// Context carries values across API boundaries.
// This context matches the context.Context interface
// (https://blog.golang.org/context), for the purposes
// of passing the api.Context through to the storage tier.
// TODO: Determine the extent that this abstraction+interface
// is used by the api, and whether we can remove.
type Context interface {
// Value returns the value associated with key or nil if none.
Value(key interface{}) interface{}
// Deadline returns the time when this Context will be canceled, if any.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that is closed when this Context is canceled
// or times out.
Done() <-chan struct{}
// Err indicates why this context was canceled, after the Done channel
// is closed.
Err() error
}
// The key type is unexported to prevent collisions
type key int
const (
// namespaceKey is the context key for the request namespace.
namespaceKey key = iota
// userKey is the context key for the request user.
userKey
// uidKey is the context key for the uid to assign to an object on create.
uidKey
)
// NewContext instantiates a base context object for request flows.
func NewContext() Context {
return context.TODO()
}
// NewDefaultContext instantiates a base context object for request flows in the default namespace
func NewDefaultContext() Context {
return WithNamespace(NewContext(), NamespaceDefault)
}
// WithValue returns a copy of parent in which the value associated with key is val.
func WithValue(parent Context, key interface{}, val interface{}) Context {
internalCtx, ok := parent.(context.Context)
if !ok
|
return context.WithValue(internalCtx, key, val)
}
// WithNamespace returns a copy of parent in which the namespace value is set
func WithNamespace(parent Context, namespace string) Context {
return WithValue(parent, namespaceKey, namespace)
}
// NamespaceFrom returns the value of the namespace key on the ctx
func NamespaceFrom(ctx Context) (string, bool) {
namespace, ok := ctx.Value(namespaceKey).(string)
return namespace, ok
}
// NamespaceValue returns the value of the namespace key on the ctx, or the empty string if none
func NamespaceValue(ctx Context) string {
namespace, _ := NamespaceFrom(ctx)
return namespace
}
// ValidNamespace returns false if the namespace on the context differs from the resource. If the resource has no namespace, it is set to the value in the context.
func ValidNamespace(ctx Context, resource *ObjectMeta) bool {
ns, ok := NamespaceFrom(ctx)
if len(resource.Namespace) == 0 {
resource.Namespace = ns
}
return ns == resource.Namespace && ok
}
// WithNamespaceDefaultIfNone returns a context whose namespace is the default if and only if the parent context has no namespace value
func WithNamespaceDefaultIfNone(parent Context) Context {
namespace, ok := NamespaceFrom(parent)
if !ok || len(namespace) == 0 {
return WithNamespace(parent, NamespaceDefault)
}
return parent
}
// WithUser returns a copy of parent in which the user value is set
func WithUser(parent Context, user user.Info) Context {
return WithValue(parent, userKey, user)
}
// UserFrom returns the value of the user key on the ctx
func UserFrom(ctx Context) (user.Info, bool) {
user, ok := ctx.Value(userKey).(user.Info)
return user, ok
}
// WithUID returns a copy of parent in which the uid value is set
func WithUID(parent Context, uid types.UID) Context {
return WithValue(parent, uidKey, uid)
}
// UIDFrom returns the value of the uid key on the ctx
func UIDFrom(ctx Context) (types.UID, bool) {
uid, ok := ctx.Value(uidKey).(types.UID)
return uid, ok
}
|
{
panic(stderrs.New("Invalid context type"))
}
|
main.go
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
)
var verbose = flag.Bool("v", false, "show verbose progress messages")
var sema = make(chan struct{}, 20)
func main() {
start := time.Now()
// determine the intial directories
flag.Parse()
roots := flag.Args()
if len(roots) == 0 {
roots = []string{"."}
}
// traverse the file tree
fileSizes := make(chan int64)
var n sync.WaitGroup
for _, root := range roots {
n.Add(1)
go walkDir(root, &n, fileSizes)
}
go func() {
n.Wait()
close(fileSizes)
}()
// print the results periodically
var tick <-chan time.Time
if *verbose {
tick = time.Tick(500 * time.Millisecond)
}
var nfiles, nbytes int64
loop:
for {
select {
case size, ok := <-fileSizes:
if !ok {
break loop // fileSizes was closed
}
nfiles++
nbytes += size
case <-tick:
printDiskUsage(nfiles, nbytes)
}
}
printDiskUsage(nfiles, nbytes)
elapsed := time.Since(start)
fmt.Printf("Elapsed time: %v\n", elapsed)
}
func printDiskUsage(nfiles, nbytes int64) {
fmt.Printf("%d files %.1f GB\n", nfiles, float64(nbytes)/1e9)
}
func
|
(dir string, n *sync.WaitGroup, fileSizes chan<- int64) {
defer n.Done()
for _, entry := range dirents(dir) {
if entry.IsDir() {
n.Add(1)
subdir := filepath.Join(dir, entry.Name())
walkDir(subdir, n, fileSizes)
} else {
fileSizes <- entry.Size()
}
}
}
func dirents(dir string) []os.FileInfo {
sema <- struct{}{} // acquire token
defer func() { <-sema }() // release token
entries, err := ioutil.ReadDir(dir)
if err != nil {
fmt.Fprintf(os.Stderr, "du1: %v\n", err)
return nil
}
return entries
}
|
walkDir
|
updatePassword.js
|
const initialState = {
formState: {
smsCode: '',
googleCode: '',
emailCode: '',
},
requestGetCodeLoading: false,
requestGetCodeResponse: null,
}
export default function updatePassword(state = initialState, action) {
let nextState
const { type, payload } = action
switch (type) {
case 'updatePassword/update_form':
nextState = {
...state,
formState: payload,
}
break
case 'updatePassword/update_auth_code_type':
nextState = {
...state,
authCodeType: payload,
|
break
case 'notify/clear_reducer':
nextState = initialState
break
default:
nextState = state
break
}
return nextState
}
|
}
|
line.go
|
package formatter
import (
"fmt"
"github.com/gol4ng/logger"
)
// Line formatter will transform log Entry into string
type Line struct {
format string
}
// Format will return Entry as string
// typically used for stdout output
func (l *Line) Format(entry logger.Entry) string {
return fmt.Sprintf(l.format, entry.Message, entry.Level, entry.Context)
}
// NewLine will create a new Line with format (fmt)
func NewLine(format string) *Line
|
{
return &Line{format}
}
|
|
tracks.go
|
package cmd
import (
"fmt"
"log"
"github.com/codegangsta/cli"
"github.com/exercism/cli/api"
"github.com/exercism/cli/config"
"github.com/exercism/cli/user"
)
// Tracks lists available tracks.
func Tracks(ctx *cli.Context) {
c, err := config.Read(ctx.GlobalString("config"))
if err != nil {
|
}
tracks, err := api.Tracks(fmt.Sprintf("%s/tracks", c.XAPI))
if err != nil {
log.Fatal(err)
}
curr := user.NewCurriculum(tracks)
fmt.Println("\nActive language tracks:")
curr.Report(user.TrackActive)
fmt.Println("\nInactive language tracks:")
curr.Report(user.TrackInactive)
// TODO: implement `list` command to list problems in a track
msg := `
Related commands:
exercism fetch (see 'exercism help fetch')
`
fmt.Println(msg)
}
|
log.Fatal(err)
|
doc.go
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
// Package dataproc is an auto-generated package for the
// Cloud Dataproc API.
//
// Manages Hadoop-based clusters and jobs on Google Cloud Platform.
//
// Use of Context
//
// The ctx passed to NewClient is used for authentication requests and
// for creating the underlying connection, but is not used for subsequent calls.
// Individual methods on the client use the ctx given to them.
//
// To close the open connection, use the Close() method.
//
// For information about setting deadlines, reusing contexts, and more
// please visit pkg.go.dev/cloud.google.com/go.
package dataproc // import "cloud.google.com/go/dataproc/apiv1beta2"
import (
"context"
"os"
"runtime"
"strconv"
"strings"
"unicode"
"google.golang.org/api/option"
"google.golang.org/grpc/metadata"
)
// For more information on implementing a client constructor hook, see
// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
type clientHookParams struct{}
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
const versionClient = "20210311"
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
func checkDisableDeadlines() (bool, error) {
raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE")
if !ok {
return false, nil
}
b, err := strconv.ParseBool(raw)
return b, err
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}
// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
|
s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
notSemverRune := func(r rune) bool {
return !strings.ContainsRune("0123456789.", r)
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}
|
func versionGo() string {
const develPrefix = "devel +"
|
rates_test.go
|
package dinero
import (
"os"
"reflect"
"testing"
. "github.com/onsi/gomega"
)
// TestAllRates will test updating our local store of forex rates from the OXR API.
func TestAllRates(t *testing.T) {
// Register the test.
RegisterTestingT(t)
// Init dinero client.
client := NewClient(os.Getenv("OPEN_EXCHANGE_APP_ID"))
// Set a base currency to work with.
client.Rates.SetBaseCurrency("AUD")
// Get latest forex rates.
response, err := client.Rates.All()
if err != nil {
t.Fatalf("Unexpected error running client.Rates.All(): %s", err.Error())
}
if response.Base != "AUD" {
t.Fatalf("Unexpected base oxr rate: %s. Expecting `AUD`.", err.Error())
}
if response.UpdatedAt.IsZero() {
t.Fatalf("Unexpected response timestamp: %s.", err.Error())
}
|
}
}
// TestSingleRate will test pulling a single rate.
func TestSingleRate(t *testing.T) {
// Register the test.
RegisterTestingT(t)
// Init dinero client.
client := NewClient(os.Getenv("OPEN_EXCHANGE_APP_ID"))
// Set a base currency to work with.
client.Rates.SetBaseCurrency("AUD")
// Get latest forex rates for NZD (using AUD as a base).
response, err := client.Rates.Single("NZD")
if err != nil {
t.Fatalf("Unexpected error running client.Rates.Single('NZD'): %s", err.Error())
}
// Did we get a *float64 back?
if reflect.TypeOf(response).String() != "*float64" {
t.Fatalf("Unexpected rate datatype, expected float64 got %T", response)
}
}
|
if response.Rates == nil {
t.Fatalf("Unexpected length of rates: %s.", err.Error())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.