file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
templates.go | package fluentd
// fluentConf: source -> fan to pipelines -> pipeline -> output [store]
var templateRegistry = []string{
inputSourceContainerTemplate,
inputSourceJournalTemplate,
inputSourceHostAuditTemplate,
inputSourceK8sAuditTemplate,
inputSourceOpenShiftAuditTemplate,
inputSourceOVNAuditTemplate,
fluentConfTemplate,
pipelineToOutputCopyTemplate,
sourceToPipelineCopyTemplate,
inputSelectorToPipelineTemplate,
inputSelectorBlockTemplate,
outputLabelConfCloudwatch,
outputLabelConfTemplate,
outputLabelConfNocopyTemplate,
outputLabelConfNoretryTemplate,
outputLabelConfJsonParseNoretryTemplate,
storeElasticsearchTemplate,
forwardTemplate,
storeSyslogTemplateOld,
storeSyslogTemplate,
storeKafkaTemplate,
}
const fluentConfTemplate = `{{- define "fluentConf" -}}
## CLO GENERATED CONFIGURATION ###
# This file is a copy of the fluentd configuration entrypoint
# which should normally be supplied in a configmap.
<system>
log_level "#{ENV['LOG_LEVEL'] || 'warn'}"
</system>
# In each section below, pre- and post- includes don't include anything initially;
# they exist to enable future additions to openshift conf as needed.
## sources
## ordered so that syslog always runs last...
<source>
@type prometheus
bind "#{ENV['POD_IP']}"
<ssl>
enable true
certificate_path "#{ENV['METRICS_CERT'] || '/etc/fluent/metrics/tls.crt'}"
private_key_path "#{ENV['METRICS_KEY'] || '/etc/fluent/metrics/tls.key'}"
</ssl>
</source>
<source>
@type prometheus_monitor
<labels>
hostname ${hostname}
</labels>
</source>
# excluding prometheus_tail_monitor
# since it leaks namespace/pod info
# via file paths
# This is considered experimental by the repo
<source>
@type prometheus_output_monitor
<labels>
hostname ${hostname}
</labels>
</source>
{{- range .SourceInputLabels }}
{{ . }}
{{- end}}
<label @MEASURE>
<filter **>
@type record_transformer
enable_ruby
<record>
msg_size ${record.to_s.length}
</record>
</filter>
<filter **>
@type prometheus
<metric>
name cluster_logging_collector_input_record_total
type counter
desc The total number of incoming records
<labels>
tag ${tag}
hostname ${hostname}
</labels>
</metric>
</filter>
<filter **>
@type prometheus
<metric>
name cluster_logging_collector_input_record_bytes
type counter
desc The total bytes of incoming records
key msg_size
<labels>
tag ${tag}
hostname ${hostname}
</labels>
</metric>
</filter>
<filter **>
@type record_transformer
remove_keys msg_size
</filter>
<match journal>
@type relabel
@label @INGRESS
</match>
<match *audit.log>
@type relabel
@label @INGRESS
</match>
<match kubernetes.**>
@type relabel
@label @CONCAT
</match>
</label>
<label @CONCAT>
<filter kubernetes.**>
@type concat
key log
partial_key logtag
partial_value P
separator ''
</filter>
<match kubernetes.**>
@type relabel
@label @INGRESS
</match>
</label>
#syslog input config here
<label @INGRESS>
## filters
<filter journal>
@type grep
<exclude>
key PRIORITY
pattern ^7$
</exclude>
</filter>
<filter ovn-audit.log**>
@type record_modifier
<record>
@timestamp ${DateTime.parse(record['message'].split('|')[0]).rfc3339(6)}
level ${record['message'].split('|')[3].downcase}
</record>
</filter>
<match journal>
@type rewrite_tag_filter
# skip to @INGRESS label section
@label @INGRESS
# see if this is a kibana container for special log handling
# looks like this:
# k8s_kibana.a67f366_logging-kibana-1-d90e3_logging_26c51a61-2835-11e6-ad29-fa163e4944d5_f0db49a2
# we filter these logs through the kibana_transform.conf filter
<rule>
key CONTAINER_NAME
pattern ^k8s_kibana\.
tag kubernetes.journal.container.kibana
</rule>
<rule>
key CONTAINER_NAME
pattern ^k8s_[^_]+_logging-eventrouter-[^_]+_
tag kubernetes.journal.container._default_.kubernetes-event
</rule>
# mark logs from default namespace for processing as k8s logs but stored as system logs
<rule>
key CONTAINER_NAME
pattern ^k8s_[^_]+_[^_]+_default_
tag kubernetes.journal.container._default_
</rule>
# mark logs from kube-* namespaces for processing as k8s logs but stored as system logs
<rule>
key CONTAINER_NAME
pattern ^k8s_[^_]+_[^_]+_kube-(.+)_
tag kubernetes.journal.container._kube-$1_
</rule>
# mark logs from openshift-* namespaces for processing as k8s logs but stored as system logs
<rule>
key CONTAINER_NAME
pattern ^k8s_[^_]+_[^_]+_openshift-(.+)_
tag kubernetes.journal.container._openshift-$1_
</rule>
# mark logs from openshift namespace for processing as k8s logs but stored as system logs
<rule>
key CONTAINER_NAME
pattern ^k8s_[^_]+_[^_]+_openshift_
tag kubernetes.journal.container._openshift_
</rule>
# mark fluentd container logs
<rule>
key CONTAINER_NAME
pattern ^k8s_.*fluentd
tag kubernetes.journal.container.fluentd
</rule>
# this is a kubernetes container
<rule>
key CONTAINER_NAME
pattern ^k8s_
tag kubernetes.journal.container
</rule>
# not kubernetes - assume a system log or system container log
<rule>
key _TRANSPORT
pattern .+
tag journal.system
</rule>
</match>
<filter kubernetes.**>
@type kubernetes_metadata
kubernetes_url 'https://kubernetes.default.svc'
cache_size '1000'
watch 'false'
use_journal 'nil'
ssl_partial_chain 'true'
</filter>
<filter kubernetes.journal.**>
@type parse_json_field
merge_json_log 'false'
preserve_json_log 'true'
json_fields 'log,MESSAGE'
</filter>
<filter kubernetes.var.log.containers.**>
@type parse_json_field
merge_json_log 'false'
preserve_json_log 'true'
json_fields 'log,MESSAGE'
</filter>
<filter kubernetes.var.log.containers.eventrouter-** kubernetes.var.log.containers.cluster-logging-eventrouter-**>
@type parse_json_field
merge_json_log true
preserve_json_log true
json_fields 'log,MESSAGE'
</filter>
<filter **kibana**>
@type record_transformer
enable_ruby
<record>
log ${record['err'] || record['msg'] || record['MESSAGE'] || record['log']}
</record>
remove_keys req,res,msg,name,level,v,pid,err
</filter>
<filter k8s-audit.log**>
@type record_modifier
<record>
k8s_audit_level ${record['level']}
level info
</record>
</filter>
<filter openshift-audit.log**>
@type record_modifier
<record>
openshift_audit_level ${record['level']}
level info
</record>
</filter>
<filter **>
@type viaq_data_model
elasticsearch_index_prefix_field 'viaq_index_name'
default_keep_fields CEE,time,@timestamp,aushape,ci_job,collectd,docker,fedora-ci,file,foreman,geoip,hostname,ipaddr4,ipaddr6,kubernetes,level,message,namespace_name,namespace_uuid,offset,openstack,ovirt,pid,pipeline_metadata,rsyslog,service,systemd,tags,testcase,tlog,viaq_msg_id
extra_keep_fields ''
keep_empty_fields 'message'
use_undefined false
undefined_name 'undefined'
rename_time true
rename_time_if_missing false
src_time_name 'time'
dest_time_name '@timestamp'
pipeline_type 'collector'
undefined_to_string 'false'
undefined_dot_replace_char 'UNUSED'
undefined_max_num_fields '-1'
process_kubernetes_events 'false'
<formatter>
tag "system.var.log**"
type sys_var_log
remove_keys host,pid,ident
</formatter>
<formatter>
tag "journal.system**"
type sys_journal
remove_keys log,stream,MESSAGE,_SOURCE_REALTIME_TIMESTAMP,__REALTIME_TIMESTAMP,CONTAINER_ID,CONTAINER_ID_FULL,CONTAINER_NAME,PRIORITY,_BOOT_ID,_CAP_EFFECTIVE,_CMDLINE,_COMM,_EXE,_GID,_HOSTNAME,_MACHINE_ID,_PID,_SELINUX_CONTEXT,_SYSTEMD_CGROUP,_SYSTEMD_SLICE,_SYSTEMD_UNIT,_TRANSPORT,_UID,_AUDIT_LOGINUID,_AUDIT_SESSION,_SYSTEMD_OWNER_UID,_SYSTEMD_SESSION,_SYSTEMD_USER_UNIT,CODE_FILE,CODE_FUNCTION,CODE_LINE,ERRNO,MESSAGE_ID,RESULT,UNIT,_KERNEL_DEVICE,_KERNEL_SUBSYSTEM,_UDEV_SYSNAME,_UDEV_DEVNODE,_UDEV_DEVLINK,SYSLOG_FACILITY,SYSLOG_IDENTIFIER,SYSLOG_PID
</formatter>
<formatter>
tag "kubernetes.journal.container**"
type k8s_journal
remove_keys 'log,stream,MESSAGE,_SOURCE_REALTIME_TIMESTAMP,__REALTIME_TIMESTAMP,CONTAINER_ID,CONTAINER_ID_FULL,CONTAINER_NAME,PRIORITY,_BOOT_ID,_CAP_EFFECTIVE,_CMDLINE,_COMM,_EXE,_GID,_HOSTNAME,_MACHINE_ID,_PID,_SELINUX_CONTEXT,_SYSTEMD_CGROUP,_SYSTEMD_SLICE,_SYSTEMD_UNIT,_TRANSPORT,_UID,_AUDIT_LOGINUID,_AUDIT_SESSION,_SYSTEMD_OWNER_UID,_SYSTEMD_SESSION,_SYSTEMD_USER_UNIT,CODE_FILE,CODE_FUNCTION,CODE_LINE,ERRNO,MESSAGE_ID,RESULT,UNIT,_KERNEL_DEVICE,_KERNEL_SUBSYSTEM,_UDEV_SYSNAME,_UDEV_DEVNODE,_UDEV_DEVLINK,SYSLOG_FACILITY,SYSLOG_IDENTIFIER,SYSLOG_PID'
</formatter>
<formatter>
tag "kubernetes.var.log.containers.eventrouter-** kubernetes.var.log.containers.cluster-logging-eventrouter-** k8s-audit.log** openshift-audit.log** ovn-audit.log**"
type k8s_json_file
remove_keys log,stream,CONTAINER_ID_FULL,CONTAINER_NAME
process_kubernetes_events 'true'
</formatter>
<formatter>
tag "kubernetes.var.log.containers**"
type k8s_json_file
remove_keys log,stream,CONTAINER_ID_FULL,CONTAINER_NAME
</formatter>
<elasticsearch_index_name>
enabled 'true'
tag "journal.system** system.var.log** **_default_** **_kube-*_** **_openshift-*_** **_openshift_**"
name_type static
static_index_name infra-write
</elasticsearch_index_name>
<elasticsearch_index_name>
enabled 'true'
tag "linux-audit.log** k8s-audit.log** openshift-audit.log** ovn-audit.log**"
name_type static
static_index_name audit-write
</elasticsearch_index_name>
<elasticsearch_index_name>
enabled 'true'
tag "**"
name_type static
static_index_name app-write
</elasticsearch_index_name>
</filter>
<filter **>
@type elasticsearch_genid_ext
hash_id_key viaq_msg_id
alt_key kubernetes.event.metadata.uid
alt_tags 'kubernetes.var.log.containers.logging-eventrouter-*.** kubernetes.var.log.containers.eventrouter-*.** kubernetes.var.log.containers.cluster-logging-eventrouter-*.** kubernetes.journal.container._default_.kubernetes-event'
</filter>
# Relabel specific source tags to specific intermediary labels for copy processing
# Earlier matchers remove logs so they don't fall through to later ones.
# A log source matcher may be null if no pipeline wants that type of log.
<match **_default_** **_kube-*_** **_openshift-*_** **_openshift_** journal.** system.var.log**>
{{- if .CollectInfraLogs }}
@type relabel
@label @_INFRASTRUCTURE
{{- else }}
@type null
{{- end}}
</match>
<match kubernetes.**>
{{- if .CollectAppLogs}}
@type relabel
@label @_APPLICATION
{{- else}}
@type null
{{- end}}
</match>
<match linux-audit.log** k8s-audit.log** openshift-audit.log** ovn-audit.log**>
{{- if .CollectAuditLogs }}
@type relabel
@label @_AUDIT
{{- else }}
@type null
{{- end}}
</match>
<match **>
@type stdout
</match>
</label>
# Relabel specific sources (e.g. logs.apps) to multiple pipelines
{{- range .SourceToPipelineLabels }}
{{ . }}
{{- end}}
{{ if .PipelinesToOutputLabels }}
# Relabel specific pipelines to multiple, outputs (e.g. ES, kafka stores)
{{- end}}
{{- range .PipelinesToOutputLabels }}
{{ . }}
{{- end}}
# Ship logs to specific outputs
{{- range .OutputLabels }}
{{ . }}
{{- end}}
{{ if .IncludeLegacySecureForward }}
<label @_LEGACY_SECUREFORWARD>
<match **>
@type copy
#include legacy secure-forward.conf
@include /etc/fluent/configs.d/secure-forward/secure-forward.conf
</match>
</label>
{{- end}}
{{ if .IncludeLegacySyslog }}
<label @_LEGACY_SYSLOG>
<match **>
@type copy
#include legacy Syslog
@include /etc/fluent/configs.d/syslog/syslog.conf
</match>
</label>
{{- end}}
{{- end}}`
const inputSourceJournalTemplate = `{{- define "inputSourceJournalTemplate" -}}
#journal logs to gather node
<source>
@type systemd
@id systemd-input
@label @MEASURE
path '/var/log/journal'
<storage>
@type local
persistent true
# NOTE: if this does not end in .json, fluentd will think it
# is the name of a directory - see fluentd storage_local.rb
path '/var/lib/fluentd/pos/journal_pos.json'
</storage>
matches "#{ENV['JOURNAL_FILTERS_JSON'] || '[]'}"
tag journal
read_from_head "#{if (val = ENV.fetch('JOURNAL_READ_FROM_HEAD','')) && (val.length > 0); val; else 'false'; end}"
</source>
{{- end}}`
const inputSourceContainerTemplate = `{{- define "inputSourceContainerTemplate" -}}
# container logs
<source>
@type tail
@id container-input
path "/var/log/containers/*.log"
exclude_path ["/var/log/containers/{{.CollectorPodNamePrefix}}-*_{{.LoggingNamespace}}_*.log", "/var/log/containers/{{.LogStorePodNamePrefix}}-*_{{.LoggingNamespace}}_*.log", "/var/log/containers/{{.VisualizationPodNamePrefix}}-*_{{.LoggingNamespace}}_*.log"]
pos_file "/var/lib/fluentd/pos/es-containers.log.pos"
refresh_interval 5
rotate_wait 5
tag kubernetes.*
read_from_head "true"
@label @MEASURE
<parse>
@type multi_format
<pattern>
format json
time_format '%Y-%m-%dT%H:%M:%S.%N%Z'
keep_time_key true
</pattern>
<pattern>
format regexp
expression /^(?<time>[^\s]+) (?<stream>stdout|stderr)( (?<logtag>.))? (?<log>.*)$/
time_format '%Y-%m-%dT%H:%M:%S.%N%:z'
keep_time_key true
</pattern>
</parse>
</source>
{{- end}}`
const inputSourceHostAuditTemplate = `{{- define "inputSourceHostAuditTemplate" -}}
# linux audit logs
<source>
@type tail
@id audit-input
@label @MEASURE
path "/var/log/audit/audit.log"
pos_file "/var/lib/fluentd/pos/audit.log.pos"
tag linux-audit.log
<parse>
@type viaq_host_audit
</parse>
</source>
{{- end}}`
const inputSourceK8sAuditTemplate = `{{- define "inputSourceK8sAuditTemplate" -}}
# k8s audit logs
<source>
@type tail
@id k8s-audit-input
@label @MEASURE
path "/var/log/kube-apiserver/audit.log"
pos_file "/var/lib/fluentd/pos/kube-apiserver.audit.log.pos"
tag k8s-audit.log
<parse>
@type json
time_key requestReceivedTimestamp
# In case folks want to parse based on the requestReceivedTimestamp key
keep_time_key true
time_format %Y-%m-%dT%H:%M:%S.%N%z
</parse>
</source>
{{- end}}`
const inputSourceOpenShiftAuditTemplate = `{{- define "inputSourceOpenShiftAuditTemplate" }}
# Openshift audit logs
<source>
@type tail
@id openshift-audit-input
@label @MEASURE
path /var/log/oauth-apiserver/audit.log,/var/log/openshift-apiserver/audit.log
pos_file /var/lib/fluentd/pos/oauth-apiserver.audit.log
tag openshift-audit.log
<parse>
@type json
time_key requestReceivedTimestamp
# In case folks want to parse based on the requestReceivedTimestamp key
keep_time_key true
time_format %Y-%m-%dT%H:%M:%S.%N%z
</parse>
</source>
{{- end}}`
const inputSourceOVNAuditTemplate = `{{- define "inputSourceOVNAuditTemplate" }}
# Openshift Virtual Network (OVN) audit logs
<source>
@type tail
@id ovn-audit-input
@label @MEASURE
path "/var/log/ovn/acl-audit-log.log"
pos_file "/var/lib/fluentd/pos/acl-audit-log.pos"
tag ovn-audit.log
refresh_interval 5
rotate_wait 5
read_from_head true
<parse>
@type none
</parse>
</source>
{{- end}}`
const sourceToPipelineCopyTemplate = `{{- define "sourceToPipelineCopyTemplate" -}}
<label {{sourceTypelabelName .Source}}>
<match **>
@type copy
{{- range $index, $pipelineLabel := .PipelineNames }}
<store>
@type relabel
@label {{labelName $pipelineLabel}}
</store>
{{- end }}
</match>
</label>
{{- end}}`
const inputSelectorToPipelineTemplate = `{{- define "inputSelectorToPipelineTemplate" -}}
<label {{sourceTypelabelName .Source}}>
<match **>
@type label_router
{{- range .InputSelectors }}
{{ . }}
{{- end}}
{{- if .PipelineNames }}
<route>
@label {{sourceTypelabelName .Source}}_ALL
<match>
</match>
</route>
{{- end }}
</match>
</label>
{{ if .PipelineNames -}}
<label {{sourceTypelabelName .Source}}_ALL>
<match **>
@type copy
{{- range $index, $pipelineLabel := .PipelineNames }}
<store>
@type relabel
@label {{labelName $pipelineLabel}}
</store>
{{- end }}
</match>
</label>
{{- end }}
{{- end}}`
const inputSelectorBlockTemplate = `{{- define "inputSelectorBlockTemplate" -}}
<route>
@label {{labelName .Pipeline}}
<match>
{{- if .Namespaces }}
namespaces {{ .Namespaces }}
{{- end}}
{{- if .Labels }}
labels {{ .Labels }}
{{- end}}
</match>
</route>
{{- end}}`
const pipelineToOutputCopyTemplate = `{{- define "pipelineToOutputCopyTemplate" -}}
<label {{labelName .Name}}>
{{ if .PipelineLabels -}}
<filter **>
@type record_transformer
<record>
openshift { "labels": {{.PipelineLabels}} }
</record>
</filter>
{{ end -}}
{{ if (eq .Parse "json") -}}
<filter **>
@type parser
key_name message
reserve_data yes
hash_value_field structured
<parse>
@type json
json_parser oj
</parse>
</filter>
{{ end -}}
<match **>
@type copy
{{- range $index, $target := .Outputs }}
<store>
@type relabel
@label {{labelName $target}}
</store>
{{- end }}
</match>
</label>
{{- end}}`
const outputLabelConfCloudwatch = `{{- define "outputLabelConfCloudwatch" -}}
<label {{.LabelName}}>
<filter kubernetes.**>
@type record_transformer
enable_ruby true
<record>
cw_group_name {{.LogGroupPrefix }}{{.LogGroupName }}
cw_stream_name ${tag}
</record>
</filter>
<filter journal **_default_** **_kube-*_** **_openshift-*_** **_openshift_**>
@type record_transformer
enable_ruby true
<record>
cw_group_name {{.LogGroupPrefix }}infrastructure
cw_stream_name ${record['hostname']}.${tag}
</record>
</filter>
<filter *audit.log>
@type record_transformer
enable_ruby true
<record>
cw_group_name {{.LogGroupPrefix }}audit
cw_stream_name ${record['hostname']}.${tag}
</record>
</filter>
<match **>
@type cloudwatch_logs
auto_create_stream true
region {{ .Target.Cloudwatch.Region }}
log_group_name_key cw_group_name
log_stream_name_key cw_stream_name
remove_log_stream_name_key true
remove_log_group_name_key true
auto_create_stream true
concurrency 2
{{- with $path := .SecretPath "aws_access_key_id"}}
aws_key_id "#{open('{{ $path }}','r') do |f|f.read end}"
{{- end}}
{{- with $path := .SecretPath "aws_secret_access_key"}}
aws_sec_key "#{open('{{ $path }}','r') do |f|f.read end}"
{{- end}}
include_time_key true
log_rejected_request true
</match>
</label>
{{- end}}`
const outputLabelConfTemplate = `{{- define "outputLabelConf" -}}
<label {{.LabelName}}>
{{- if (.NeedChangeElasticsearchStructuredType)}}
<filter **>
@type record_modifier
<record>
typeFromKey ${record.dig({{.GetKeyVal .Target.OutputTypeSpec.Elasticsearch.StructuredTypeKey}})}
hasStructuredTypeName "{{.Target.OutputTypeSpec.Elasticsearch.StructuredTypeName}}"
viaq_index_name ${ if !record['structured'].nil? && record['structured'] != {}; if !record['typeFromKey'].nil?; "app-"+record['typeFromKey']+"-write"; elsif record['hasStructuredTypeName'] != ""; "app-"+record['hasStructuredTypeName']+"-write"; else record['viaq_index_name']; end; else record['viaq_index_name']; end;}
</record>
remove_keys typeFromKey, hasStructuredTypeName
</filter>
{{- else}}
<filter **>
@type record_modifier
remove_keys structured
</filter>
{{- end}}
{{- if .IsElasticSearchOutput}}
#flatten labels to prevent field explosion in ES
<filter ** >
@type record_transformer
enable_ruby true
<record>
kubernetes ${!record['kubernetes'].nil? ? record['kubernetes'].merge({"flat_labels": (record['kubernetes']['labels']||{}).map{|k,v| "#{k}=#{v}"}}) : {} }
</record>
remove_keys $.kubernetes.labels
</filter>
{{- end}}
<match {{.RetryTag}}>
@type copy
{{ include .StoreTemplate . "prefix_as_retry" | indent 4}}
</match>
<match **>
@type copy
{{ include .StoreTemplate . "include_retry_tag"| indent 4}}
</match>
</label>
{{- end}}`
const outputLabelConfNocopyTemplate = `{{- define "outputLabelConfNoCopy" -}}
<label {{.LabelName}}>
<match **>
{{include .StoreTemplate . "" | indent 4}}
</match>
</label>
{{- end}}`
const outputLabelConfNoretryTemplate = `{{- define "outputLabelConfNoRetry" -}}
<label {{.LabelName}}>
<match **>
@type copy
{{include .StoreTemplate . "" | indent 4}}
</match>
</label>
{{- end}}`
const outputLabelConfJsonParseNoretryTemplate = `{{- define "outputLabelConfJsonParseNoRetry" -}}
<label {{.LabelName}}>
<filter **>
@type parse_json_field
json_fields message
merge_json_log false
replace_json_log true
</filter>
{{ if .Target.Syslog.AddLogSource }}
<filter **>
@type record_modifier
<record>
kubernetes_info ${if record.has_key?('kubernetes'); record['kubernetes']; else {}; end}
namespace_info ${if record['kubernetes_info'] != nil && record['kubernetes_info'] != {}; "namespace_name=" + record['kubernetes_info']['namespace_name']; else nil; end}
pod_info ${if record['kubernetes_info'] != nil && record['kubernetes_info'] != {}; "pod_name=" + record['kubernetes_info']['pod_name']; else nil; end}
container_info ${if record['kubernetes_info'] != nil && record['kubernetes_info'] != {}; "container_name=" + record['kubernetes_info']['container_name']; else nil; end}
msg_key ${if record.has_key?('message') && record['message'] != nil; record['message']; else nil; end}
msg_info ${if record['msg_key'] != nil && record['msg_key'].is_a?(Hash); require 'json'; "message="+record['message'].to_json; elsif record['msg_key'] != nil; "message="+record['message']; else nil; end}
message ${if record['msg_key'] != nil && record['kubernetes_info'] != nil && record['kubernetes_info'] != {}; record['namespace_info'] + ", " + record['container_info'] + ", " + record['pod_info'] + ", " + record['msg_info']; else record['message']; end}
</record>
remove_keys kubernetes_info, namespace_info, pod_info, container_info, msg_key, msg_info
</filter>
{{end -}}
<match **>
@type copy
{{include .StoreTemplate . "" | indent 4}}
</match>
</label>
{{- end}}`
const forwardTemplate = `{{- define "forward" -}}
# https://docs.fluentd.org/v1.0/articles/in_forward
@type forward
heartbeat_type none
keepalive true
{{- with $sharedKey := .GetSecret "shared_key" }}
<security>
self_hostname "#{ENV['NODE_NAME']}"
shared_key "{{$sharedKey}}"
</security>
{{- end}}
{{- if .IsTLS }}
transport tls
tls_verify_hostname false
tls_version 'TLSv1_2'
{{- if not .Secret}}
tls_insecure_mode true
{{- end}}
{{- with $path := .SecretPathIfFound "tls.key"}}
tls_client_private_key_path "{{$path}}"
{{- end}}
{{- with $path := .SecretPathIfFound "tls.crt"}}
tls_client_cert_path "{{$path}}"
{{- end}}
{{- with $path := .SecretPathIfFound "ca-bundle.crt"}}
tls_cert_path "{{$path}}"
{{- end}}
{{ with $path := .SecretPathIfFound "passphrase" -}}
tls_client_private_key_passphrase "#{File.exists?('{{ $path }}') ? open('{{ $path }}','r') do |f|f.read end : ''}"
{{ end -}}
{{- end}}
<buffer>
@type file
path '{{.BufferPath}}'
queued_chunks_limit_size "#{ENV['BUFFER_QUEUE_LIMIT'] || '1024' }"
{{- if .TotalLimitSize }}
total_limit_size {{.TotalLimitSize}}
{{- else }}
total_limit_size "#{ENV['TOTAL_LIMIT_SIZE'] || 8589934592 }" #8G
{{- end }}
{{- if .ChunkLimitSize }}
chunk_limit_size {{.ChunkLimitSize}}
{{- else }}
chunk_limit_size "#{ENV['BUFFER_SIZE_LIMIT'] || '1m'}"
{{- end }}
flush_mode {{.FlushMode}}
flush_interval {{.FlushInterval}}
flush_at_shutdown true
flush_thread_count {{.FlushThreadCount}}
retry_type {{.RetryType}}
retry_wait {{.RetryWait}}
retry_max_interval {{.RetryMaxInterval}}
{{.RetryTimeout}}
# the systemd journald 0.0.8 input plugin will just throw away records if the buffer
# queue limit is hit - 'block' will halt further reads and keep retrying to flush the
# buffer to the remote - default is 'block' because in_tail handles that case
overflow_action {{.OverflowAction}}
</buffer>
<server>
host {{.Host}}
port {{.Port}}
</server>
{{- end}}`
const storeElasticsearchTemplate = `{{ define "storeElasticsearch" -}}
<store>
@type elasticsearch
@id {{.StoreID }}
host {{.Host}}
port {{.Port}}
verify_es_version_at_startup false
{{- if .Target.Secret }}
{{ if and (.SecretPathIfFound "username") (.SecretPathIfFound "password") -}}
{{ if and (.SecretPathIfFound "tls.key") (.SecretPathIfFound "tls.crt") -}}
scheme https
ssl_version TLSv1_2
client_key '{{ .SecretPath "tls.key"}}'
client_cert '{{ .SecretPath "tls.crt"}}'
ca_file '{{ .SecretPath "ca-bundle.crt"}}'
{{ else -}}
scheme http
{{ end -}}
{{ with $path := .SecretPath "username" -}}
user "#{File.exists?('{{ $path }}') ? open('{{ $path }}','r') do |f|f.read end : ''}"
{{ end -}}
{{ with $path := .SecretPath "password" -}}
password "#{File.exists?('{{ $path }}') ? open('{{ $path }}','r') do |f|f.read end : ''}"
{{ end -}}
{{ else -}}
scheme https
ssl_version TLSv1_2
client_key '{{ .SecretPath "tls.key"}}'
client_cert '{{ .SecretPath "tls.crt"}}'
ca_file '{{ .SecretPath "ca-bundle.crt"}}'
{{ end -}}
{{- else}}
scheme http
{{- end }}
target_index_key viaq_index_name
id_key viaq_msg_id
remove_keys viaq_index_name
type_name _doc
{{- if .Hints.Has "include_retry_tag" }}
retry_tag {{.RetryTag}}
{{- end }}
http_backend typhoeus
write_operation create
reload_connections 'true'
# https://github.com/uken/fluent-plugin-elasticsearch#reload-after
reload_after '200'
# https://github.com/uken/fluent-plugin-elasticsearch#sniffer-class-name
sniffer_class_name 'Fluent::Plugin::ElasticsearchSimpleSniffer'
reload_on_failure false
# 2 ^ 31
request_timeout 2147483648
<buffer>
@type file
path '{{.BufferPath}}'
flush_mode {{.FlushMode}}
flush_interval {{.FlushInterval}}
flush_thread_count {{.FlushThreadCount}}
flush_at_shutdown true
retry_type {{.RetryType}}
retry_wait {{.RetryWait}}
retry_max_interval {{.RetryMaxInterval}}
{{.RetryTimeout}}
queued_chunks_limit_size "#{ENV['BUFFER_QUEUE_LIMIT'] || '32' }"
{{- if .TotalLimitSize }}
total_limit_size {{.TotalLimitSize}}
{{- else }}
total_limit_size "#{ENV['TOTAL_LIMIT_SIZE'] || 8589934592 }" #8G
{{- end}}
{{- if .ChunkLimitSize }}
chunk_limit_size {{.ChunkLimitSize}}
{{- else }}
chunk_limit_size "#{ENV['BUFFER_SIZE_LIMIT'] || '8m'}"
{{- end }}
overflow_action {{.OverflowAction}}
</buffer>
</store>
{{- end}}`
const storeSyslogTemplateOld = `{{- define "storeSyslogOld" -}}
<store>
@type {{.SyslogPlugin}}
@id {{.StoreID}}
remote_syslog {{.Host}}
port {{.Port}}
hostname ${hostname}
facility user
severity debug
</store>
{{- end}}`
// hostname ${hostname}
const storeSyslogTemplate = `{{- define "storeSyslog" -}}
<store>
@type remote_syslog
@id {{.StoreID}}
host {{.Host}}
port {{.Port}}
rfc {{.Rfc}}
facility {{.Facility}}
severity {{.Severity}}
{{if .Target.Syslog.AppName -}}
appname {{.AppName}}
{{end -}}
{{if .Target.Syslog.MsgID -}}
msgid {{.MsgID}} | {{end -}}
{{if .Target.Syslog.ProcID -}}
procid {{.ProcID}}
{{end -}}
{{if .Target.Syslog.Tag -}}
program {{.Tag}}
{{end -}}
protocol {{.Protocol}}
packet_size 4096
hostname "#{ENV['NODE_NAME']}"
{{ if .Target.Secret -}}
tls true
ca_file '{{ .SecretPath "ca-bundle.crt"}}'
verify_mode true
{{ end -}}
{{ if (eq .Protocol "tcp") -}}
timeout 60
timeout_exception true
keep_alive true
keep_alive_idle 75
keep_alive_cnt 9
keep_alive_intvl 7200
{{ end -}}
{{if .PayloadKey -}}
<format>
@type single_value
message_key {{.PayloadKey}}
</format>
{{end -}}
<buffer {{.ChunkKeys}}>
@type file
path '{{.BufferPath}}'
flush_mode {{.FlushMode}}
flush_interval {{.FlushInterval}}
flush_thread_count {{.FlushThreadCount}}
flush_at_shutdown true
retry_type {{.RetryType}}
retry_wait {{.RetryWait}}
retry_max_interval {{.RetryMaxInterval}}
{{.RetryTimeout}}
queued_chunks_limit_size "#{ENV['BUFFER_QUEUE_LIMIT'] || '32' }"
{{- if .TotalLimitSize }}
total_limit_size {{.TotalLimitSize}}
{{- else }}
total_limit_size "#{ENV['TOTAL_LIMIT_SIZE'] || 8589934592 }" #8G
{{- end }}
{{- if .ChunkLimitSize }}
chunk_limit_size {{.ChunkLimitSize}}
{{- else }}
chunk_limit_size "#{ENV['BUFFER_SIZE_LIMIT'] || '8m'}"
{{- end }}
overflow_action {{.OverflowAction}}
</buffer>
</store>
{{- end}}`
const storeKafkaTemplate = `{{- define "storeKafka" -}}
@type kafka2
brokers {{.Brokers}}
default_topic {{.Topic}}
use_event_time true
{{ if .Target.Secret -}}
{{ if and (.SecretPathIfFound "username") (.SecretPathIfFound "password") -}}
{{ with $path := .SecretPath "username" -}}
sasl_plain_username "#{File.exists?('{{ $path }}') ? open('{{ $path }}','r') do |f|f.read end : ''}"
{{ end -}}
{{ with $path := .SecretPath "password" -}}
sasl_plain_password "#{File.exists?('{{ $path }}') ? open('{{ $path }}','r') do |f|f.read end : ''}"
{{ end -}}
{{ end -}}
{{ if not (.SecretPathIfFound "sasl_over_ssl") -}}
sasl_over_ssl false
{{ else }}
sasl_over_ssl true
{{ end -}}
{{ if and (.SecretPathIfFound "tls.crt") (.SecretPathIfFound "tls.key") -}}
{{ $tlsCert := .SecretPath "tls.crt" }}
{{ $tlsKey := .SecretPath "tls.key" }}
ssl_client_cert "#{File.exist?('{{ $tlsCert }}') ? '{{ $tlsCert }}' : nil}"
ssl_client_cert_key "#{File.exist?('{{ $tlsKey }}') ? '{{ $tlsKey }}' : nil}"
{{ end -}}
{{ if .SecretPathIfFound "ca-bundle.crt" -}}
ssl_ca_cert '{{ .SecretPath "ca-bundle.crt"}}'
{{ end -}}
{{ end -}}
<format>
@type json
</format>
<buffer {{.Topic}}>
@type file
path '{{.BufferPath}}'
flush_mode {{.FlushMode}}
flush_interval {{.FlushInterval}}
flush_thread_count {{.FlushThreadCount}}
flush_at_shutdown true
retry_type {{.RetryType}}
retry_wait {{.RetryWait}}
retry_max_interval {{.RetryMaxInterval}}
{{.RetryTimeout}}
queued_chunks_limit_size "#{ENV['BUFFER_QUEUE_LIMIT'] || '32' }"
{{- if .TotalLimitSize }}
total_limit_size {{.TotalLimitSize}}
{{- else }}
total_limit_size "#{ENV['TOTAL_LIMIT_SIZE'] || 8589934592 }" #8G
{{- end }}
{{- if .ChunkLimitSize }}
chunk_limit_size {{.ChunkLimitSize}}
{{- else }}
chunk_limit_size "#{ENV['BUFFER_SIZE_LIMIT'] || '8m'}"
{{- end }}
overflow_action {{.OverflowAction}}
</buffer>
{{- end}}
` | |
_public_ip_prefixes_operations.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PublicIPPrefixesOperations(object):
"""PublicIPPrefixesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the PublicIpPrefix.
:type public_ip_prefix_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.PublicIPPrefix"
"""Gets the specified public IP prefix in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the Public IP Prefix.
:type public_ip_prefix_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.PublicIPPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "models.PublicIPPrefix"
**kwargs # type: Any
):
# type: (...) -> "models.PublicIPPrefix"
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPPrefix')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "models.PublicIPPrefix"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.PublicIPPrefix"]
"""Creates or updates a static or dynamic public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to the create or update public IP prefix operation.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.PublicIPPrefix
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PublicIPPrefix or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_07_01.models.PublicIPPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.PublicIPPrefix"
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.PublicIPPrefix"]
"""Updates public IP prefix tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to update public IP prefix tags.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PublicIPPrefix or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_07_01.models.PublicIPPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name, | )
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.PublicIPPrefixListResult"]
"""Gets all the public IP prefixes in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_07_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.PublicIPPrefixListResult"]
"""Gets all public IP prefixes in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_07_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore | public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs |
TmplSig.py | from time import time, sleep
from typing import List, Tuple, Dict, Any, Optional, Union
from base64 import b64decode
import base64
import random
import hashlib
import uuid
import sys
import json
import uvarint
import pprint
from local_blob import LocalBlob
from algosdk.v2client.algod import AlgodClient
from algosdk.kmd import KMDClient
from algosdk import account, mnemonic
from algosdk.encoding import decode_address
from algosdk.future import transaction
from pyteal import compileTeal, Mode, Expr
from pyteal import *
from algosdk.logic import get_application_address
from algosdk.future.transaction import LogicSigAccount
class | :
"""KeySig class reads in a json map containing assembly details of a template smart signature and allows you to populate it with the variables
In this case we are only interested in a single variable, the key which is a byte string to make the address unique.
In this demo we're using random strings but in practice you can choose something meaningful to your application
"""
def __init__(self, name):
# Read the source map
# with open("{}.json".format(name)) as f:
# self.map = json.loads(f.read())
self.map = {"name":"lsig.teal","version":6,"source":"","bytecode":"BiABAYEASIAASDEQgQYSRDEZIhJEMRiBABJEMSCAABJEMQGBABJEMQkyAxJEMRUyAxJEIg==",
"template_labels":{
"TMPL_ADDR_IDX":{"source_line":3,"position":5,"bytes":False},
"TMPL_EMITTER_ID":{"source_line":5,"position":8,"bytes":True},
"TMPL_APP_ID":{"source_line":16,"position":24,"bytes":False},
"TMPL_APP_ADDRESS":{"source_line":20,"position":30,"bytes":True}
},
"label_map":{},"line_map":[0,1,4,6,7,9,10,12,14,15,16,18,19,20,21,23,25,26,27,29,31,32,33,35,37,38,39,41,43,44,45,47,49,50,51]
}
self.src = base64.b64decode(self.map["bytecode"])
self.sorted = dict(
sorted(
self.map["template_labels"].items(),
key=lambda item: item[1]["position"],
)
)
def populate(self, values: Dict[str, Union[str, int]]) -> LogicSigAccount:
"""populate uses the map to fill in the variable of the bytecode and returns a logic sig with the populated bytecode"""
# Get the template source
contract = list(base64.b64decode(self.map["bytecode"]))
shift = 0
for k, v in self.sorted.items():
if k in values:
pos = v["position"] + shift
if v["bytes"]:
val = bytes.fromhex(values[k])
lbyte = uvarint.encode(len(val))
# -1 to account for the existing 00 byte for length
shift += (len(lbyte) - 1) + len(val)
# +1 to overwrite the existing 00 byte for length
contract[pos : pos + 1] = lbyte + val
else:
val = uvarint.encode(values[k])
# -1 to account for existing 00 byte
shift += len(val) - 1
# +1 to overwrite existing 00 byte
contract[pos : pos + 1] = val
# Create a new LogicSigAccount given the populated bytecode,
#pprint.pprint({"values": values, "contract": bytes(contract).hex()})
return LogicSigAccount(bytes(contract))
def get_bytecode_chunk(self, idx: int) -> Bytes:
start = 0
if idx > 0:
start = list(self.sorted.values())[idx - 1]["position"] + 1
stop = len(self.src)
if idx < len(self.sorted):
stop = list(self.sorted.values())[idx]["position"]
chunk = self.src[start:stop]
return Bytes(chunk)
def get_bytecode_raw(self, idx: int):
start = 0
if idx > 0:
start = list(self.sorted.values())[idx - 1]["position"] + 1
stop = len(self.src)
if idx < len(self.sorted):
stop = list(self.sorted.values())[idx]["position"]
chunk = self.src[start:stop]
return chunk
def get_sig_tmpl(self):
def sig_tmpl():
admin_app_id = ScratchVar()
admin_address= ScratchVar()
return Seq(
# Just putting adding this as a tmpl var to make the address unique and deterministic
# We don't actually care what the value is, pop it
Pop(Tmpl.Int("TMPL_ADDR_IDX")),
Pop(Tmpl.Bytes("TMPL_EMITTER_ID")),
Assert(Txn.type_enum() == TxnType.ApplicationCall),
Assert(Txn.on_completion() == OnComplete.OptIn),
Assert(Txn.application_id() == Tmpl.Int("TMPL_APP_ID")),
Assert(Txn.rekey_to() == Tmpl.Bytes("TMPL_APP_ADDRESS")),
Assert(Txn.fee() == Int(0)),
Assert(Txn.close_remainder_to() == Global.zero_address()),
Assert(Txn.asset_close_to() == Global.zero_address()),
Approve()
)
return compileTeal(sig_tmpl(), mode=Mode.Signature, version=6, assembleConstants=True)
if __name__ == '__main__':
core = TmplSig("sig")
# client = AlgodClient("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "http://localhost:4001")
# pprint.pprint(client.compile( core.get_sig_tmpl()))
with open("sig.tmpl.teal", "w") as f:
f.write(core.get_sig_tmpl())
| TmplSig |
test_blockchain.py | # -*- coding: utf-8 -*-
import asyncio
import pytest
import logging
from bitshares.aio.blockchain import Blockchain
log = logging.getLogger("grapheneapi")
log.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch = logging.StreamHandler()
ch.setFormatter(formatter)
log.addHandler(ch)
@pytest.fixture(scope="module")
async def chain(bitshares):
return await Blockchain(mode="head", blockchain_instance=bitshares)
@pytest.mark.asyncio
async def | (chain):
await chain.info()
@pytest.mark.asyncio
async def test_chainParameters(chain):
await chain.chainParameters()
@pytest.mark.asyncio
async def test_get_network(chain):
chain.get_network()
@pytest.mark.asyncio
async def test_get_chain_properties(chain):
await chain.get_chain_properties()
@pytest.mark.asyncio
async def test_config(chain):
await chain.config()
@pytest.mark.asyncio
async def test_get_current_block_num(chain):
await chain.get_current_block_num()
@pytest.mark.asyncio
async def test_get_current_block(chain):
await chain.get_current_block()
@pytest.mark.asyncio
async def test_get_block_interval(chain):
await chain.get_block_interval()
@pytest.mark.asyncio
async def test_block_time(chain):
await chain.block_time(1)
@pytest.mark.asyncio
async def test_block_timestamp(chain):
await chain.block_timestamp(1)
@pytest.mark.asyncio
async def test_blocks(chain):
async for block in chain.blocks(start=1, stop=5):
assert "transactions" in block
@pytest.mark.skip(reason="for internal use, depends on setting self.block_interval")
@pytest.mark.asyncio
async def test_wait_for_and_get_block(chain):
pass
@pytest.mark.asyncio
async def test_ops(chain):
async for op in chain.ops(start=1, stop=5):
assert "op" in op
@pytest.mark.asyncio
async def test_stream(chain):
async for op in chain.stream(start=1, stop=5):
assert "type" in op
@pytest.mark.asyncio
async def test_awaitTxConfirmation(bitshares, chain, default_account):
trx = await bitshares.transfer(
"init1", 1, "TEST", memo="awaitTxConfirmation", account=default_account
)
await chain.awaitTxConfirmation(trx)
@pytest.mark.asyncio
async def test_get_all_accounts(chain):
with pytest.raises(RuntimeError):
async for account in chain.get_all_accounts():
assert account
@pytest.mark.asyncio
async def test_participation_rate(chain):
rate = await chain.participation_rate
assert rate > 0
| test_info |
pipelines.py | from sqlalchemy.orm import sessionmaker
from models import Forecasts, db_connect, create_forecast_table
import logging
class PollenScraperPipeline(object):
def __init__(self):
engine = db_connect()
create_forecast_table(engine)
self.Session = sessionmaker(bind=engine)
def process_item(self, item, spider):
| session = self.Session()
forecast = Forecasts(**item)
try:
session.add(forecast)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return item |
|
indexController.js | function IndexController($scope, $http, $modal) {
$scope.newMeeting = function() {
$modal.open({
templateUrl: 'newMeetingModal.html',
backdrop: true,
size: 'lg',
controller: function($scope, $modalInstance) {
$scope.loading = false;
$scope.error = null;
$scope.name = '';
$scope.timePerTopic = 4;
$scope.timePerTopicAfterVote = 2;
$scope.votesPerPerson = 3;
$scope.submit = function() {
// TODO: Validate numbers
if ($scope.name === '') {
$scope.error = 'Name must be given!';
return;
}
$scope.error = '';
$scope.loading = true;
var postData = {
name: $scope.name,
timePerTopic: $scope.timePerTopic,
timePerTopicAfterVote: $scope.timePerTopicAfterVote,
votesPerPerson: $scope.votesPerPerson
};
$http.post('/meeting', postData).success(function(meeting) {
$scope.loading = false;
window.location.href = '/meeting/' + meeting.id;
}).error(function(error) {
$scope.loading = false;
$scope.error = error;
});
};
$scope.cancel = function () {
$modalInstance.dismiss(false);
};
} |
macchiatoControllers.controller('IndexController', IndexController); | });
};
} |
tag.test.js | const Hexo = require('hexo'),
moment = require('moment'),
_ = require('lodash'),
Promise = require('bluebird'),
tag = require('../lib/tag')
const instanciateHexo = function(tag) {
const hexo = new Hexo(__dirname, { silent: true })
hexo.config.sitemap = {
path: 'sitemap.xml'
}
if (tag !== undefined) {
hexo.config.sitemap.tag = tag
}
hexo.config.permalink = ':title'
hexo.init()
return Promise.resolve(hexo)
}
const insertPosts = function(hexo) {
const Post = hexo.model('Post')
const mockedPosts = [
{ source: 'foo', slug: 'foo', path: 'foo', updated: moment.utc([2015, 0, 1, 8]).toDate() },
{ source: 'bar', slug: 'bar', path: 'bar', updated: moment.utc([2015, 0, 2, 14]).toDate() },
{ source: 'baz', slug: 'baz', path: 'baz', updated: moment.utc([2015, 0, 3, 16]).toDate() }
]
return [hexo, Post.insert(mockedPosts)]
}
const setPostTag = function(hexo, posts) {
const post = posts[1]
return [hexo, post.setTags(['Tag1'])]
}
const getHexoLocalsAndConfig = function(hexo) {
return Promise.resolve([hexo.locals.toObject(), hexo.config])
}
describe('SEO-friendly sitemap generator', function() {
const applyTag = function(args) {
return tag.apply(null, args)
}
it('should not generate sitemap tag file if no tags are mentioned in posts', function() {
const checkAssertions = function(result) {
expect(result).toBeUndefined()
}
return instanciateHexo()
.then(getHexoLocalsAndConfig)
.then(applyTag)
.call('get')
.then(checkAssertions)
})
it('should generate sitemap tag data', function() {
const checkAssertions = function(result) {
expect(typeof result).toBe('object')
expect(moment(result.lastModification).isSame(moment.utc([2015, 0, 2, 14]))).toBeTruthy()
expect(Array.isArray(result.data.items)).toBe(true)
expect(result.data.items).toHaveLength(1)
expect(_.some(result.data.items, { name: 'Tag1' })).toBeTruthy()
}
return instanciateHexo()
.then(insertPosts)
.spread(setPostTag)
.spread(getHexoLocalsAndConfig)
.then(applyTag)
.call('get')
.then(checkAssertions)
})
it('should not generate sitemap tag file if config.sitemap.tag set to false', function() {
const checkAssertions = function(result) {
expect(result).toBeUndefined()
}
return instanciateHexo(false) | .then(insertPosts)
.spread(setPostTag)
.spread(getHexoLocalsAndConfig)
.then(applyTag)
.call('get')
.then(checkAssertions)
})
}) | |
peprint.py | import inspect
import math
import re
from functools import singledispatch, partial
from itertools import chain, cycle
from .api import (
always_break,
annotate,
concat,
contextual,
flat_choice,
fill,
group,
nest,
NIL,
LINE,
SOFTLINE,
HARDLINE
)
from .doc import (
Annotated,
Doc
)
from .layout import layout_smart
from .syntax import Token
from .utils import identity, intersperse
UNSET_SENTINEL = object()
COMMA = annotate(Token.PUNCTUATION, ',')
COLON = annotate(Token.PUNCTUATION, ':')
ELLIPSIS = annotate(Token.PUNCTUATION, '...')
LPAREN = annotate(Token.PUNCTUATION, '(')
RPAREN = annotate(Token.PUNCTUATION, ')')
LBRACKET = annotate(Token.PUNCTUATION, '[')
RBRACKET = annotate(Token.PUNCTUATION, ']')
LBRACE = annotate(Token.PUNCTUATION, '{')
RBRACE = annotate(Token.PUNCTUATION, '}')
NEG_OP = annotate(Token.OPERATOR, '-')
MUL_OP = annotate(Token.OPERATOR, '*')
ADD_OP = annotate(Token.OPERATOR, '+')
ASSIGN_OP = annotate(Token.OPERATOR, '=')
WHITESPACE_PATTERN_TEXT = re.compile(r'(\s+)')
WHITESPACE_PATTERN_BYTES = re.compile(rb'(\s+)')
NONWORD_PATTERN_TEXT = re.compile(r'(\W+)')
NONWORD_PATTERN_BYTES = re.compile(rb'(\W+)')
# For dict keys
"""
(
'aaaaaaaaaa'
'aaaaaa'
)
"""
MULTILINE_STATEGY_PARENS = 'MULTILINE_STATEGY_PARENS'
# For dict values
"""
'aaaaaaaaaa'
'aaaaa'
"""
MULTILINE_STATEGY_INDENTED = 'MULTILINE_STATEGY_INDENTED'
# For sequence elements
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STATEGY_HANG = 'MULTILINE_STATEGY_HANG'
# For top level strs
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STATEGY_PLAIN = 'MULTILINE_STATEGY_PLAIN'
IMPLICIT_MODULES = {
'__main__',
'builtins',
}
class CommentAnnotation:
def __init__(self, value):
assert isinstance(value, str)
self.value = value
def __repr__(self):
return f'ValueComment({repr(self.value)})'
class _CommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
class _TrailingCommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
def annotate_comment(comment, doc):
"""Annotate ``doc`` with ``comment`` text.
Peprint will make sure the parent (or top-level) handler
will render the comment in a proper way. E.g. if ``doc``
represents an element in a list, then the ``list`` pretty
printer will handle where to place the comment.
Differs from ``comment`` and ``trailing_comment`` by
operating only on Docs, not normal values.
"""
return annotate(CommentAnnotation(comment), doc)
def comment(comment_str, value):
"""Annotates a value with a comment str.
Allows you to insert comments into Peprint output
by annotating them on the values directly, instead
of first having to render them into a Doc and then
annotating the Doc with ``annotate_comment``.
Generally, you want to use this to annotate arguments
to ``prettycall``.
"""
return _CommentedValue(value, comment_str)
def trailing_comment(comment_str, value):
"""Annotates a value with a comment str, so that
the comment will be rendered "trailing", e.g. in place
of the last element in a list, set or tuple, or after
the last argument in a function.
This will force the rendering of `value` to be broken
to multple lines as Python does not have inline comments.
"""
return _TrailingCommentedValue(value, comment_str)
def unwrap_comments(value):
comment = None
trailing_comment = None
while isinstance(value, (_CommentedValue, _TrailingCommentedValue)):
if isinstance(value, _CommentedValue):
comment = value.comment
value = value.value
elif isinstance(value, _TrailingCommentedValue):
trailing_comment = value.comment
value = value.value
return (value, comment, trailing_comment)
def is_commented(value):
return (
isinstance(value, Annotated) and
isinstance(value.annotation, CommentAnnotation)
)
def builtin_identifier(s):
return annotate(Token.NAME_BUILTIN, s)
def identifier(s):
return annotate(Token.NAME_FUNCTION, s)
def general_identifier(s):
if callable(s):
module, qualname = s.__module__, s.__qualname__
if module in IMPLICIT_MODULES:
if module == 'builtins':
return builtin_identifier(qualname)
return identifier(qualname)
return identifier(f'{module}.{qualname}')
return identifier(s)
def classattr(cls, attrname):
return concat([
general_identifier(cls),
identifier(f'.{attrname}')
])
class PrettyContext:
__slots__ = (
'indent',
'depth_left',
'visited',
'multiline_strategy',
'user_ctx'
)
def __init__(
self,
indent,
depth_left,
visited=None,
multiline_strategy=MULTILINE_STATEGY_PLAIN,
user_ctx=None,
):
self.indent = indent
self.depth_left = depth_left
self.multiline_strategy = multiline_strategy
if visited is None:
visited = set()
self.visited = visited
if user_ctx is None:
user_ctx = {}
self.user_ctx = user_ctx
def _replace(self, **kwargs):
passed_keys = set(kwargs.keys())
fieldnames = type(self).__slots__
assert passed_keys.issubset(set(fieldnames))
return PrettyContext(
**{
k: (
kwargs[k]
if k in passed_keys
else getattr(self, k)
)
for k in fieldnames
}
)
def use_multiline_strategy(self, strategy):
return self._replace(multiline_strategy=strategy)
def set(self, key, value):
return self._replace(user_ctx={
**self.user_ctx,
key: value,
})
def get(self, key, default=None):
return self.user_ctx.get(key, default)
def nested_call(self):
return self._replace(depth_left=self.depth_left - 1)
def start_visit(self, value):
self.visited.add(id(value))
def end_visit(self, value):
self.visited.remove(id(value))
def is_visited(self, value):
return id(value) in self.visited
def _run_pretty(pretty_fn, value, ctx, trailing_comment=None):
if ctx.is_visited(value):
return _pretty_recursion(value)
ctx.start_visit(value)
if trailing_comment:
doc = pretty_fn(value, ctx, trailing_comment=trailing_comment)
else:
doc = pretty_fn(value, ctx)
if not (
isinstance(doc, str) or
isinstance(doc, Doc)
):
fnname = f'{pretty_fn.__module__}.{pretty_fn.__qualname__}'
raise ValueError(
'Functions decorated with register_pretty must return '
f'an instance of str or Doc. {fnname} returned '
f'{repr(doc)} instead.'
)
ctx.end_visit(value)
return doc
_PREDICATE_REGISTRY = []
def _repr_pretty(value, ctx):
for predicate, fn in _PREDICATE_REGISTRY:
if predicate(value):
return fn(value, ctx)
return repr(value)
pretty_dispatch = singledispatch(partial(_run_pretty, _repr_pretty))
def pretty_python_value(value, ctx):
comment = None
trailing_comment = None
value, comment, trailing_comment = unwrap_comments(value)
if trailing_comment:
doc = pretty_dispatch(
value,
ctx,
trailing_comment=trailing_comment
)
else:
doc = pretty_dispatch(
value,
ctx
)
if comment:
return annotate_comment(
comment,
doc
)
return doc
def register_pretty(type=None, predicate=None):
"""Returns a decorator that registers the decorated function
as the pretty printer for instances of ``type``.
"""
if type is None and predicate is None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument."
)
if type is not None and predicate is not None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument,"
"but not both"
)
if predicate is not None:
if not callable(predicate):
raise ValueError(
f"Expected a callable for 'predicate', got {repr(predicate)}"
)
def decorator(fn):
sig = inspect.signature(fn)
value = None
ctx = None
try:
sig.bind(value, ctx)
except TypeError:
fnname = f'{fn.__module__}.{fn.__qualname__}'
raise ValueError(
"Functions decorated with register_pretty must accept "
"exactly two positional parameters: 'value' and 'ctx'. "
f"The function signature for {fnname} was not compatible."
)
if type:
pretty_dispatch.register(type, partial(_run_pretty, fn))
else:
assert callable(predicate)
_PREDICATE_REGISTRY.append((predicate, fn))
return fn
return decorator
def bracket(ctx, left, child, right):
return concat([
left,
nest(ctx.indent, concat([SOFTLINE, child])),
SOFTLINE,
right
])
def commentdoc(text):
"""Returns a Doc representing a comment `text`. `text` is
treated as words, and any whitespace may be used to break
the comment to multiple lines."""
if not text:
raise ValueError(
f'Expected non-empty comment str, got {repr(text)}'
)
commentlines = []
for line in text.splitlines():
alternating_words_ws = list(filter(None, WHITESPACE_PATTERN_TEXT.split(line)))
starts_with_whitespace = bool(
WHITESPACE_PATTERN_TEXT.match(alternating_words_ws[0])
)
if starts_with_whitespace:
prefix = alternating_words_ws[0]
alternating_words_ws = alternating_words_ws[1:]
else:
prefix = NIL
if len(alternating_words_ws) % 2 == 0:
# The last part must be whitespace.
alternating_words_ws = alternating_words_ws[:-1]
for idx, tup in enumerate(zip(alternating_words_ws, cycle([False, True]))):
part, is_ws = tup
if is_ws:
alternating_words_ws[idx] = flat_choice(
when_flat=part,
when_broken=always_break(
concat([
HARDLINE,
'# ',
])
)
)
commentlines.append(
concat([
'# ',
prefix,
fill(alternating_words_ws)
])
)
outer = identity
if len(commentlines) > 1:
outer = always_break
return annotate(
Token.COMMENT_SINGLE,
outer(concat(intersperse(HARDLINE, commentlines)))
)
def sequence_of_docs(ctx, left, docs, right, dangle=False):
docs = list(docs)
# Performance optimization:
# in case of really long sequences,
# the layout algorithm can be quite slow.
# No branching here is needed if the sequence
# is long enough that even with the shortest
# element output, it does not fit the ribbon width.
minimum_output_len = (
2 + # Assume left and right are one character each
len(', ') * (len(docs) - 1) +
len(docs) # each element must take at least one character
)
MAX_PRACTICAL_RIBBON_WIDTH = 150
will_break = minimum_output_len > MAX_PRACTICAL_RIBBON_WIDTH
has_comment = any(is_commented(doc) for doc in docs)
parts = []
for idx, doc in enumerate(docs):
last = idx == len(docs) - 1
if is_commented(doc):
comment_str = doc.annotation.value
# Try to fit the comment at the end of the same line.
flat_version = concat([
doc,
COMMA if not last else NIL,
' ',
commentdoc(comment_str),
HARDLINE if not last else NIL
])
# If the value is broken to multiple lines, add
# comment on the line above.
broken_version = concat([
commentdoc(comment_str),
HARDLINE,
doc,
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
parts.append(
group(
flat_choice(
when_flat=flat_version,
when_broken=broken_version,
)
)
)
else:
parts.append(doc)
if not last:
parts.append(
concat([COMMA, LINE])
)
if dangle:
parts.append(COMMA)
outer = (
always_break
if will_break or has_comment
else group
)
return outer(bracket(ctx, left, concat(parts), right))
def prettycall(ctx, fn, *args, **kwargs):
"""Returns a Doc that represents a function call to :keyword:`fn` with
the remaining positional and keyword arguments.
Given an arbitrary context ``ctx``,::
prettycall(ctx, sorted, [7, 4, 5], reverse=True)
Will result in output::
sorted([7, 4, 5], reverse=True)
The layout algorithm will automatically break the call to multiple
lines if needed::
sorted(
[7, 4, 5],
reverse=True
)
``prettycall`` automatically handles syntax highlighting.
:param ctx: a context value
:type ctx: peprint.peprint.PrettyContext
:param fn: a callable
:param args: positional arguments to render to the call
:param kwargs: keyword arguments to render to the call
:returns: :class:`~peprint.doc.Doc`
"""
fndoc = general_identifier(fn)
if ctx.depth_left <= 0:
return concat([fndoc, LPAREN, ELLIPSIS, RPAREN])
if not kwargs and len(args) == 1:
sole_arg = args[0]
unwrapped_sole_arg, _comment, _trailing_comment = unwrap_comments(args[0])
if type(unwrapped_sole_arg) in (list, dict, tuple):
return build_fncall(
ctx,
fndoc,
argdocs=[pretty_python_value(sole_arg, ctx)],
hug_sole_arg=True,
)
nested_ctx = (
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STATEGY_HANG)
)
return build_fncall(
ctx,
fndoc,
argdocs=(
pretty_python_value(arg, nested_ctx)
for arg in args
),
kwargdocs=(
(kwarg, pretty_python_value(v, nested_ctx))
for kwarg, v in kwargs.items()
),
)
def build_fncall(
ctx,
fndoc,
argdocs=(),
kwargdocs=(),
hug_sole_arg=False,
trailing_comment=None,
):
"""Builds a doc that looks like a function call,
from docs that represent the function, arguments
and keyword arguments.
If ``hug_sole_arg`` is True, and the represented
functional call is done with a single non-keyword
argument, the function call parentheses will hug
the sole argument doc without newlines and indentation
in break mode. This makes a difference in calls
like this::
> hug_sole_arg = False
frozenset(
[
1,
2,
3,
4,
5
]
)
> hug_sole_arg = True
frozenset([
1,
2,
3,
4,
5,
])
If ``trailing_comment`` is provided, the text is
rendered as a comment after the last argument and
before the closing parenthesis. This will force
the function call to be broken to multiple lines.
"""
if callable(fndoc):
fndoc = general_identifier(fndoc)
has_comment = bool(trailing_comment)
argdocs = list(argdocs)
kwargdocs = list(kwargdocs)
kwargdocs = [
# Propagate any comments to the kwarg doc.
(
annotate_comment(
doc.annotation.value,
concat([binding, ASSIGN_OP, doc.doc])
)
if is_commented(doc)
else concat([binding, ASSIGN_OP, doc])
)
for binding, doc in kwargdocs
]
if not (argdocs or kwargdocs):
return concat([
fndoc,
LPAREN,
RPAREN,
])
if (
hug_sole_arg and
not kwargdocs and
len(argdocs) == 1 and
not is_commented(argdocs[0])
):
return group(
concat([
fndoc,
LPAREN,
argdocs[0],
RPAREN
])
)
allarg_docs = [*argdocs, *kwargdocs]
if trailing_comment:
allarg_docs.append(commentdoc(trailing_comment))
parts = []
for idx, doc in enumerate(allarg_docs):
last = idx == len(allarg_docs) - 1
if is_commented(doc):
has_comment = True
comment_str = doc.annotation.value
doc = doc.doc
else:
comment_str = None
part = concat([doc, NIL if last else COMMA])
if comment_str:
part = group(
flat_choice(
when_flat=concat([
part,
' ',
commentdoc(comment_str)
]),
when_broken=concat([
commentdoc(comment_str),
HARDLINE,
part,
]),
)
)
if not last:
part = concat([part, HARDLINE if has_comment else LINE])
parts.append(part)
outer = (
always_break
if has_comment
else group
)
return outer(
concat([
fndoc,
LPAREN,
nest(
ctx.indent,
concat([
SOFTLINE,
concat(parts),
])
),
SOFTLINE,
RPAREN
])
) | @register_pretty(tuple)
@register_pretty(list)
@register_pretty(set)
def pretty_bracketable_iterable(value, ctx, trailing_comment=None):
dangle = False
if isinstance(value, list):
left, right = LBRACKET, RBRACKET
elif isinstance(value, tuple):
left, right = LPAREN, RPAREN
if len(value) == 1:
dangle = True
elif isinstance(value, set):
left, right = LBRACE, RBRACE
if not value:
if isinstance(value, (list, tuple)):
return concat([left, right])
else:
assert isinstance(value, set)
return prettycall(ctx, set)
if ctx.depth_left == 0:
return concat([left, ELLIPSIS, right])
if len(value) == 1:
sole_value = list(value)[0]
els = [
pretty_python_value(
sole_value,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STATEGY_PLAIN)
)
)
]
else:
els = (
pretty_python_value(
el,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STATEGY_HANG)
)
)
for el in value
)
if trailing_comment:
els = chain(els, [commentdoc(trailing_comment)])
dangle = False
return sequence_of_docs(ctx, left, els, right, dangle=dangle)
@register_pretty(frozenset)
def pretty_frozenset(value, ctx):
if value:
return prettycall(ctx, frozenset, list(value))
return prettycall(ctx, frozenset)
class _AlwaysSortable(object):
__slots__ = ('value', )
def __init__(self, value):
self.value = value
def sortable_value(self):
return (str(type(self)), id(self))
def __lt__(self, other):
try:
return self.value < other.value
except TypeError:
return self.sortable_value() < other.sortable_value()
@register_pretty(dict)
def pretty_dict(d, ctx):
if ctx.depth_left == 0:
return concat([LBRACE, ELLIPSIS, RBRACE])
has_comment = False
pairs = []
for k in sorted(d.keys(), key=_AlwaysSortable):
v = d[k]
if isinstance(k, (str, bytes)):
kdoc = pretty_str(
k,
# not a nested call on purpose
ctx=ctx.use_multiline_strategy(MULTILINE_STATEGY_PARENS),
)
else:
kdoc = pretty_python_value(
k,
ctx=ctx.nested_call()
)
vdoc = pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STATEGY_INDENTED)
),
)
kcomment = None
if is_commented(kdoc):
has_comment = True
kcomment = kdoc.annotation.value
kdoc = kdoc.doc
vcomment = None
if is_commented(vdoc):
has_comment = True
vcomment = vdoc.annotation.value
vdoc = vdoc.doc
pairs.append((k, v, kdoc, vdoc, kcomment, vcomment))
parts = []
for idx, tup in enumerate(pairs):
last = idx == len(pairs) - 1
k, v, kdoc, vdoc, kcomment, vcomment = tup
if not (kcomment or vcomment):
parts.append(
concat([
kdoc,
concat([COLON, ' ']),
vdoc,
NIL if last else COMMA,
NIL if last else LINE,
]),
)
continue
if kcomment:
kcommented = concat([
commentdoc(kcomment),
HARDLINE,
kdoc,
])
else:
kcommented = kdoc
if vcomment:
vcommented = group(
flat_choice(
# Add comment at the end of the line
when_flat=concat([
vdoc,
NIL if last else COMMA,
' ',
commentdoc(vcomment),
NIL if last else HARDLINE,
]),
# Put comment above the value
# on its own line
when_broken=concat([
nest(
ctx.indent,
concat([
HARDLINE,
commentdoc(vcomment),
HARDLINE,
# Rerender vdoc with plain multiline strategy,
# since we already have an indentation.
pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STATEGY_PLAIN)
),
),
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
),
])
)
)
else:
vcommented = concat([
vdoc,
COMMA if not last else NIL,
LINE if not last else NIL
])
parts.append(
concat([
kcommented,
concat([COLON, ' ']),
vcommented
])
)
doc = bracket(
ctx,
LBRACE,
concat(parts),
RBRACE,
)
if len(pairs) > 2 or has_comment:
doc = always_break(doc)
else:
doc = group(doc)
return doc
INF_FLOAT = float('inf')
NEG_INF_FLOAT = float('-inf')
@register_pretty(float)
def pretty_float(value, ctx):
if ctx.depth_left == 0:
return prettycall(ctx, float, ...)
if value == INF_FLOAT:
return prettycall(ctx, float, 'inf')
elif value == NEG_INF_FLOAT:
return prettycall(ctx, float, '-inf')
elif math.isnan(value):
return prettycall(ctx, float, 'nan')
return annotate(Token.NUMBER_FLOAT, repr(value))
@register_pretty(int)
def pretty_int(value, ctx):
if ctx.depth_left == 0:
return prettycall(ctx, int, ...)
return annotate(Token.NUMBER_INT, repr(value))
@register_pretty(type(...))
def pretty_ellipsis(value, ctx):
return ELLIPSIS
@register_pretty(bool)
@register_pretty(type(None))
def pretty_singletons(value, ctx):
return annotate(Token.KEYWORD_CONSTANT, repr(value))
SINGLE_QUOTE_TEXT = "'"
SINGLE_QUOTE_BYTES = b"'"
DOUBLE_QUOTE_TEXT = '"'
DOUBLE_QUOTE_BYTES = b'"'
def determine_quote_strategy(s):
if isinstance(s, str):
single_quote = SINGLE_QUOTE_TEXT
double_quote = DOUBLE_QUOTE_TEXT
else:
single_quote = SINGLE_QUOTE_BYTES
double_quote = DOUBLE_QUOTE_BYTES
contains_single = single_quote in s
contains_double = double_quote in s
if not contains_single:
return SINGLE_QUOTE_TEXT
if not contains_double:
return DOUBLE_QUOTE_TEXT
assert contains_single and contains_double
single_count = s.count(single_quote)
double_count = s.count(double_quote)
if single_count <= double_count:
return SINGLE_QUOTE_TEXT
return DOUBLE_QUOTE_TEXT
def escape_str_for_quote(use_quote, s):
escaped_with_quotes = repr(s)
repr_used_quote = escaped_with_quotes[-1]
# string may have a prefix
first_quote_at_index = escaped_with_quotes.find(repr_used_quote)
repr_escaped = escaped_with_quotes[first_quote_at_index + 1:-1]
if repr_used_quote == use_quote:
# repr produced the quotes we wanted -
# escaping is correct.
return repr_escaped
# repr produced different quotes, which escapes
# alternate quotes.
if use_quote == SINGLE_QUOTE_TEXT:
# repr used double quotes
return (
repr_escaped
.replace('\\"', DOUBLE_QUOTE_TEXT)
.replace(SINGLE_QUOTE_TEXT, "\\'")
)
else:
# repr used single quotes
return (
repr_escaped
.replace("\\'", SINGLE_QUOTE_TEXT)
.replace(DOUBLE_QUOTE_TEXT, '\\"')
)
STR_LITERAL_ESCAPES = re.compile(
r'''((?:\\[\\abfnrtv"'])|'''
r'(?:\\N\{.*?\})|'
r'(?:\\u[a-fA-F0-9]{4})|'
r'(?:\\U[a-fA-F0-9]{8})|'
r'(?:\\x[a-fA-F0-9]{2})|'
r'(?:\\[0-7]{1,3}))'
)
def highlight_escapes(s):
if not s:
return NIL
matches = STR_LITERAL_ESCAPES.split(s)
starts_with_match = bool(STR_LITERAL_ESCAPES.match(matches[0]))
docs = []
for part, is_escaped in zip(
matches,
cycle([starts_with_match, not starts_with_match])
):
if not part:
continue
docs.append(
annotate(
(
Token.STRING_ESCAPE
if is_escaped
else Token.LITERAL_STRING
),
part
)
)
return concat(docs)
def pretty_single_line_str(s, indent, use_quote=None):
prefix = (
annotate(Token.STRING_AFFIX, 'b')
if isinstance(s, bytes)
else ''
)
if use_quote is None:
use_quote = determine_quote_strategy(s)
escaped = escape_str_for_quote(use_quote, s)
escapes_highlighted = highlight_escapes(escaped)
return concat([
prefix,
annotate(
Token.LITERAL_STRING,
concat([
use_quote,
escapes_highlighted,
use_quote
])
)
])
def split_at(idx, sequence):
return (sequence[:idx], sequence[idx:])
def escaped_len(s, use_quote):
return len(escape_str_for_quote(use_quote, s))
def str_to_lines(max_len, use_quote, s):
if isinstance(s, str):
whitespace_pattern = WHITESPACE_PATTERN_TEXT
nonword_pattern = NONWORD_PATTERN_TEXT
empty = ''
else:
assert isinstance(s, bytes)
whitespace_pattern = WHITESPACE_PATTERN_BYTES
nonword_pattern = NONWORD_PATTERN_BYTES
empty = b''
alternating_words_ws = whitespace_pattern.split(s)
if len(alternating_words_ws) <= 1:
# no whitespace: try splitting with nonword pattern.
alternating_words_ws = nonword_pattern.split(s)
starts_with_whitespace = bool(nonword_pattern.match(alternating_words_ws[0]))
else:
starts_with_whitespace = bool(whitespace_pattern.match(alternating_words_ws[0]))
# List[Tuple[str, bool]]
# The boolean associated with each part indicates if it is a
# whitespce/non-word part or not.
tagged_alternating = list(
zip(
alternating_words_ws,
cycle([starts_with_whitespace, not starts_with_whitespace])
)
)
remaining_stack = list(reversed(tagged_alternating))
curr_line_parts = []
curr_line_len = 0
while remaining_stack:
curr, is_whitespace = remaining_stack.pop()
curr_line_parts.append(curr)
curr_line_len += escaped_len(curr, use_quote)
if curr_line_len == max_len:
if not is_whitespace and len(curr_line_parts) > 2:
curr_line_parts.pop()
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
remaining_stack.append((curr, is_whitespace))
else:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
continue
elif curr_line_len > max_len:
if not is_whitespace and len(curr_line_parts) > 1:
curr_line_parts.pop()
yield empty.join(curr_line_parts)
remaining_stack.append((curr, is_whitespace))
curr_line_parts = []
curr_line_len = 0
continue
curr_line_parts.pop()
remaining_len = max_len - (curr_line_len - escaped_len(curr, use_quote))
this_line_part, next_line_part = split_at(max(remaining_len, 0), curr)
curr_line_parts.append(this_line_part)
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
if next_line_part:
remaining_stack.append((next_line_part, is_whitespace))
if curr_line_parts:
yield empty.join(curr_line_parts)
@register_pretty(str)
@register_pretty(bytes)
def pretty_str(s, ctx):
# Subclasses of str/bytes
# will be printed as StrSubclass('the actual string')
constructor = type(s)
is_native_type = constructor in (str, bytes)
if ctx.depth_left == 0:
if isinstance(s, str):
return prettycall(ctx, constructor, ...)
else:
assert isinstance(s, bytes)
return prettycall(ctx, constructor, ...)
multiline_strategy = ctx.multiline_strategy
peprint_indent = ctx.indent
def evaluator(indent, column, page_width, ribbon_width):
nonlocal multiline_strategy
columns_left_in_line = page_width - column
columns_left_in_ribbon = indent + ribbon_width - column
available_width = min(columns_left_in_line, columns_left_in_ribbon)
singleline_str_chars = len(s) + len('""')
flat_version = pretty_single_line_str(s, peprint_indent)
if singleline_str_chars <= available_width:
if is_native_type:
return flat_version
return build_fncall(ctx, constructor, argdocs=[flat_version])
# multiline string
each_line_starts_on_col = indent + peprint_indent
each_line_ends_on_col = min(page_width, each_line_starts_on_col + ribbon_width)
each_line_max_str_len = each_line_ends_on_col - each_line_starts_on_col - 2
use_quote = determine_quote_strategy(s)
lines = str_to_lines(
max_len=each_line_max_str_len,
use_quote=use_quote,
s=s,
)
parts = intersperse(
HARDLINE,
(
pretty_single_line_str(
line,
indent=peprint_indent,
use_quote=use_quote,
)
for line in lines
)
)
if not is_native_type:
multiline_strategy = MULTILINE_STATEGY_PLAIN
if multiline_strategy == MULTILINE_STATEGY_PLAIN:
res = always_break(concat(parts))
if is_native_type:
return res
return build_fncall(ctx, constructor, argdocs=[res])
elif multiline_strategy == MULTILINE_STATEGY_HANG:
return always_break(
nest(
peprint_indent,
concat(parts)
)
)
else:
if multiline_strategy == MULTILINE_STATEGY_PARENS:
left_paren, right_paren = LPAREN, RPAREN
else:
assert multiline_strategy == MULTILINE_STATEGY_INDENTED
left_paren, right_paren = '', ''
return always_break(
concat([
left_paren,
nest(
peprint_indent,
concat([
HARDLINE,
*parts,
])
),
(
HARDLINE
if multiline_strategy == MULTILINE_STATEGY_PARENS
else NIL
),
right_paren
])
)
return contextual(evaluator)
def _pretty_recursion(value):
return f'<Recursion on {type(value).__name__} with id={id(value)}>'
def python_to_sdocs(value, indent, width, depth, ribbon_width=71):
if depth is None:
depth = float('inf')
doc = pretty_python_value(
value,
ctx=PrettyContext(indent=indent, depth_left=depth, visited=set())
)
if is_commented(doc):
doc = group(
flat_choice(
when_flat=concat([
doc,
' ',
commentdoc(doc.annotation.value),
]),
when_broken=concat([
commentdoc(doc.annotation.value),
HARDLINE,
doc
])
)
)
ribbon_frac = min(1.0, ribbon_width / width)
return layout_smart(doc, width=width, ribbon_frac=ribbon_frac) | |
security.go | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package svc
import (
"syscall"
"unsafe"
"github.com/ifishnet/winsvc/winapi"
)
// TODO(brainman): move some of that code to syscall/security.go
// getInfo retrieves a specified type of information about an access token.
func getInfo(t syscall.Token, class uint32, initSize int) (unsafe.Pointer, error) {
b := make([]byte, initSize)
var n uint32
e := syscall.GetTokenInformation(t, class, &b[0], uint32(len(b)), &n)
if e != nil {
if e != syscall.ERROR_INSUFFICIENT_BUFFER {
return nil, e
}
// make receive buffers of requested size and try again
b = make([]byte, n)
e = syscall.GetTokenInformation(t, class, &b[0], uint32(len(b)), &n)
if e != nil {
return nil, e
}
}
return unsafe.Pointer(&b[0]), nil
}
// getTokenUser retrieves access token t user account information.
func getTokenGroups(t syscall.Token) (*winapi.Tokengroups, error) {
i, e := getInfo(t, syscall.TokenGroups, 50)
if e != nil {
return nil, e
}
return (*winapi.Tokengroups)(i), nil
}
func allocSid(subAuth0 uint32) (*syscall.SID, error) {
var sid *syscall.SID
err := winapi.AllocateAndInitializeSid(&winapi.SECURITY_NT_AUTHORITY,
1, subAuth0, 0, 0, 0, 0, 0, 0, 0, &sid)
if err != nil {
return nil, err
}
return sid, nil
}
// IsAnInteractiveSession determines if calling process is running interactively.
// It queries the process token for membership in the Interactive group.
// http://stackoverflow.com/questions/2668851/how-do-i-detect-that-my-application-is-running-as-service-or-in-an-interactive-s
func IsAnInteractiveSession() (bool, error) |
// IsAnIinteractiveSession is a misspelled version of IsAnInteractiveSession.
// Do not use. It is kept here so we do not break existing code.
func IsAnIinteractiveSession() (bool, error) {
return IsAnInteractiveSession()
}
| {
interSid, err := allocSid(winapi.SECURITY_INTERACTIVE_RID)
if err != nil {
return false, err
}
defer winapi.FreeSid(interSid)
serviceSid, err := allocSid(winapi.SECURITY_SERVICE_RID)
if err != nil {
return false, err
}
defer winapi.FreeSid(serviceSid)
t, err := syscall.OpenCurrentProcessToken()
if err != nil {
return false, err
}
defer t.Close()
gs, err := getTokenGroups(t)
if err != nil {
return false, err
}
p := unsafe.Pointer(&gs.Groups[0])
groups := (*[256]syscall.SIDAndAttributes)(p)[:gs.GroupCount]
for _, g := range groups {
if winapi.EqualSid(g.Sid, interSid) {
return true, nil
}
if winapi.EqualSid(g.Sid, serviceSid) {
return false, nil
}
}
return false, nil
} |
test_redundant_router_network_rules.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
from marvin.integration.lib.base import *
from marvin.integration.lib.utils import *
from marvin.integration.lib.common import *
#Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.cloudstackAPI import *
class Services:
"""Test Services for customer defects
"""
def __init__(self):
self.services = {
"account": {
"email": "[email protected]",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
"disk_offering": {
"displaytext": "Small",
"name": "Small",
"disksize": 1
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"static_nat": {
"startport": 22,
"endport": 22,
"protocol": "TCP"
},
"network_offering": {
"name": 'Network offering-RVR services',
"displaytext": 'Network off-RVR services',
"guestiptype": 'Isolated',
"supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Firewall,Lb,UserData,StaticNat',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList": {
"Vpn": 'VirtualRouter',
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
"Firewall": 'VirtualRouter',
"Lb": 'VirtualRouter',
"UserData": 'VirtualRouter',
"StaticNat": 'VirtualRouter',
},
"serviceCapabilityList": {
"SourceNat": {
"SupportedSourceNatTypes": "peraccount",
"RedundantRouter": "true",
},
"lb": {
"SupportedLbIsolation": "dedicated"
},
},
},
"host": {
"username": "root",
"password": "password",
"publicport": 22,
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
},
"lbrule": {
"name": "SSH",
"alg": "roundrobin",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 22,
"openfirewall": True,
},
"natrule": {
"privateport": 22,
"publicport": 22,
"protocol": "TCP"
},
"natrule_221": {
"privateport": 22,
"publicport": 221,
"protocol": "TCP"
},
"fw_rule": {
"startport": 1,
"endport": 6000,
"cidr": '55.55.0.0/11',
# Any network (For creating FW rule)
"protocol": 'TCP',
},
"ostype": 'CentOS 5.3 (64-bit)',
"sleep": 60,
}
class TestRedundantRouterRulesLifeCycle(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestRedundantRouterRulesLifeCycle,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.services["network_offering"],
conservemode=True
)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
cls._cleanup = [
cls.service_offering,
cls.network_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
|
@attr(tags=["advanced", "advancedns", "ssh"])
def test_networkRules_afterRebootRouters(self):
"""Test network rules after master & backup routers rebooted
"""
# Steps to validate
# 1. listNetworks should show the created network in allocated state
# 2. listRouters returns no running routers
# 3. VMs should be deployed and in Running state
# 4. should list MASTER and BACKUP routers
# 5. listPublicIpAddresses for networkid should show acquired IP addr
# 6. listStaticNats for the network associated
# 7. listFirewallRules should show allowed ports open
# 8. ssh to succeed to the guestVM
# 9. listPublicIpAddresses for networkid should show acquired IP addr
# 10. listPortForwardRules to show open ports 221, 222
# 11. ssh should succeed for both ports
# 12. listPublicIpAddresses for networkid should show acquired IP addr
# 13 and 14. listLoadBalancerRules should show associated VMs for
# public IP
# 15. ssh should succeed to the user VMs
# 16. listRouters should show one Router in MASTER state and Running
# 17. ssh should work for PF, FW, and LB ips
# 18. listRouters should show both routers MASTER and BACKUP in
# Running state
# 19. listPortForwardingRules, listFirewallRules, listLoadBalancerRule
# should return empty response
# 20. listPublicIpAddresses should show now more addresses
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
network.id
))
self.debug("Enabling static NAT for IP: %s" %
public_ip.ipaddress.ipaddress)
try:
static_nat = StaticNATRule.create(
self.apiclient,
self.services["fw_rule"],
ipaddressid=public_ip.ipaddress.id
)
static_nat.enable(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=virtual_machine.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip.ipaddress.ipaddress)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
public_ips = PublicIPAddress.list(
self.apiclient,
associatednetworkid=network.id,
listall=True,
isstaticnat=True
)
self.assertEqual(
isinstance(public_ips, list),
True,
"List public Ip for network should list the Ip addr"
)
self.assertEqual(
public_ips[0].ipaddress,
public_ip.ipaddress.ipaddress,
"List public Ip for network should list the Ip addr"
)
self.debug("creating a FW rule on IP: %s" %
public_ip.ipaddress.ipaddress)
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created a firewall rule on 22 port of IP: %s" %
public_ip.ipaddress.ipaddress)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
network.id
))
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_221"],
ipaddressid=public_ip_2.ipaddress.id,
openfirewall=True
)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_3.ipaddress.ipaddress,
network.id
))
self.debug("Creating LB rule for IP address: %s" %
public_ip_3.ipaddress.ipaddress)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip_3.ipaddress.id,
accountid=self.account.name,
networkid=network.id
)
self.debug("Adding %s to the LB rule %s" % (
virtual_machine.name,
lb_rule.name
))
lb_rule.assign(self.apiclient, [virtual_machine])
self.debug("Starting router ID: %s" % master_router.id)
for router in routers:
try:
self.debug("Rebooting router ID: %s" % master_router.id)
#Stop the router
cmd = rebootRouter.rebootRouterCmd()
cmd.id = router.id
self.apiclient.rebootRouter(cmd)
except Exception as e:
self.fail("Failed to reboot router..")
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
for router in routers:
self.assertEqual(
router.state,
"Running",
"Router state should be running"
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_applyRules_restartRvRNetwork(self):
"""Test apply rules after network restart
"""
# Steps to validate
# 1. listNetworks should show the created network in allocated state
# 2. listRouters returns no running routers
# 3. VMs should be deployed and in Running state
# 4. should list MASTER and BACKUP routers
# 5. listPublicIpAddresses for networkid should show acquired IP addr
# 6. listStaticNats for the network associated
# 7. listFirewallRules should show allowed ports open
# 8. ssh to succeed to the guestVM
# 9. listPublicIpAddresses for networkid should show acquired IP addr
# 10. listPortForwardRules to show open ports 221, 222
# 11. ssh should succeed for both ports
# 12. listPublicIpAddresses for networkid should show acquired IP addr
# 13 and 14. listLoadBalancerRules should show associated VMs for
# public IP
# 15. ssh should succeed to the user VMs
# 16. listRouters should show one Router in MASTER state and Running &
# one in BACKUP and Running
# 17. ssh should work for PF, FW, and LB ips
# 18. listRouters should show one Router in MASTER state and Running &
# one in BACKUP and Running
# 19. ssh should work for PF, FW, and LB ips
# 20. listPortForwardingRules, listFirewallRules, listLoadBalancerRule
# should return empty response
# 21. listPublicIpAddresses should show now more addresses
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
network.id
))
self.debug("Enabling static NAT for IP: %s" %
public_ip.ipaddress.ipaddress)
try:
static_nat = StaticNATRule.create(
self.apiclient,
self.services["fw_rule"],
ipaddressid=public_ip.ipaddress.id
)
static_nat.enable(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=virtual_machine.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip.ipaddress.ipaddress)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
public_ips = PublicIPAddress.list(
self.apiclient,
associatednetworkid=network.id,
listall=True,
isstaticnat=True
)
self.assertEqual(
isinstance(public_ips, list),
True,
"List public Ip for network should list the Ip addr"
)
self.assertEqual(
public_ips[0].ipaddress,
public_ip.ipaddress.ipaddress,
"List public Ip for network should list the Ip addr"
)
self.debug("creating a FW rule on IP: %s" %
public_ip.ipaddress.ipaddress)
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created a firewall rule on 22 port of IP: %s" %
public_ip.ipaddress.ipaddress)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
network.id
))
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_221"],
ipaddressid=public_ip_2.ipaddress.id,
openfirewall=True
)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_3.ipaddress.ipaddress,
network.id
))
self.debug("Creating LB rule for IP address: %s" %
public_ip_3.ipaddress.ipaddress)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip_3.ipaddress.id,
accountid=self.account.name,
networkid=network.id
)
self.debug("Adding %s to the LB rule %s" % (
virtual_machine.name,
lb_rule.name
))
lb_rule.assign(self.apiclient, [virtual_machine])
self.debug("Restarting network ID: %s with cleanup true" %
network.id)
try:
network.restart(self.apiclient, cleanup=True)
except Exception as e:
self.fail("Failed to cleanup network")
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
for router in routers:
self.assertEqual(
router.state,
"Running",
"Router state should be running"
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Restarting network ID: %s with cleanup false" %
network.id)
try:
network.restart(self.apiclient, cleanup=False)
except Exception as e:
self.fail("Failed to cleanup network")
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
for router in routers:
self.assertEqual(
router.state,
"Running",
"Router state should be running"
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_apply_and__delete_NetworkRulesOnRvR(self):
"""Test apply and delete network rules on redundant router
"""
# Steps to validate
# 1. listNetworks should show the created network in allocated state
# 2. listRouters returns no running routers
# 3. VMs should be deployed and in Running state
# 4. should list MASTER and BACKUP routers
# 5. listPublicIpAddresses for networkid should show acquired IP
# 6. listRemoteAccessVpns for the network associated should show the
# VPN created
# 7. listRemoteAccessVpns for the network associated should return
# empty response
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
network.id
))
self.debug("Enabling static NAT for IP: %s" %
public_ip.ipaddress.ipaddress)
try:
static_nat = StaticNATRule.create(
self.apiclient,
self.services["fw_rule"],
ipaddressid=public_ip.ipaddress.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip.ipaddress.ipaddress)
static_nat.enable(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=virtual_machine.id
)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
public_ips = PublicIPAddress.list(
self.apiclient,
associatednetworkid=network.id,
listall=True,
isstaticnat=True
)
self.assertEqual(
isinstance(public_ips, list),
True,
"List public Ip for network should list the Ip addr"
)
self.assertEqual(
public_ips[0].ipaddress,
public_ip.ipaddress.ipaddress,
"List public Ip for network should list the Ip addr"
)
self.debug("creating a FW rule on IP: %s" %
public_ip.ipaddress.ipaddress)
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created a firewall rule on 22 port of IP: %s" %
public_ip.ipaddress.ipaddress)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
network.id
))
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_221"],
ipaddressid=public_ip_2.ipaddress.id,
openfirewall=True
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_3.ipaddress.ipaddress,
network.id
))
self.debug("Creating LB rule for IP address: %s" %
public_ip_3.ipaddress.ipaddress)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip_3.ipaddress.id,
accountid=self.account.name,
networkid=network.id
)
self.debug("Adding %s to the LB rule %s" % (
virtual_machine.name,
lb_rule.name
))
lb_rule.assign(self.apiclient, [virtual_machine])
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh", "needle"])
def test_applyNetworkRules_MasterDown_deleteNetworkRules(self):
"""Test apply network rules when master down and delete network rules
"""
# Steps to validate
# 1. listNetworks should show the created network in allocated state
# 2. listRouters returns no running routers
# 3. VMs should be deployed and in Running state
# 4. should list MASTER and BACKUP routers
# 5. listPublicIpAddresses for networkid should show acquired IP addr
# 6. listStaticNats for the network associated
# 7. listFirewallRules should show allowed ports open
# 8. ssh to succeed to the guestVM
# 9. listPublicIpAddresses for networkid should show acquired IP addr
# 10. listPortForwardRules to show open ports 221, 222
# 11. ssh should succeed for both ports
# 12. listPublicIpAddresses for networkid should show acquired IP addr
# 13 and 14. listLoadBalancerRules should show associated VMs for
# public IP
# 15. ssh should succeed to the user VMs
# 16. listRouters should show one Router in MASTER state and Running
# 17. ssh should work for PF, FW, and LB ips
# 18. listRouters should show both routers MASTER and BACKUP in
# Running state
# 19. listPortForwardingRules, listFirewallRules, listLoadBalancerRule
# should return empty response
# 20. listPublicIpAddresses should show now more addresses
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Stopping router ID: %s" % master_router.id)
try:
Router.stop(self.apiclient, id=master_router.id)
except Exception as e:
self.fail("Failed to stop master router..")
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
network.id
))
self.debug("Enabling static NAT for IP: %s" %
public_ip.ipaddress.ipaddress)
try:
static_nat = StaticNATRule.create(
self.apiclient,
self.services["fw_rule"],
ipaddressid=public_ip.ipaddress.id
)
static_nat.enable(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=virtual_machine.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip.ipaddress.ipaddress)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
public_ips = PublicIPAddress.list(
self.apiclient,
associatednetworkid=network.id,
listall=True,
isstaticnat=True
)
self.assertEqual(
isinstance(public_ips, list),
True,
"List public Ip for network should list the Ip addr"
)
self.assertEqual(
public_ips[0].ipaddress,
public_ip.ipaddress.ipaddress,
"Public Ip Address in the network created (%s) and listed (%s) do not match" % (
public_ips[0].ipaddress, public_ip.ipaddress.ipaddress)
)
self.debug("creating a FW rule on IP: %s" %
public_ip.ipaddress.ipaddress)
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created a firewall rule on 22 port of IP: %s" %
public_ip.ipaddress.ipaddress)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
network.id
))
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_221"],
ipaddressid=public_ip_2.ipaddress.id,
openfirewall=True
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_3.ipaddress.ipaddress,
network.id
))
self.debug("Creating LB rule for IP address: %s" %
public_ip_3.ipaddress.ipaddress)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip_3.ipaddress.id,
accountid=self.account.name,
networkid=network.id
)
self.debug("Adding %s to the LB rule %s" % (
virtual_machine.name,
lb_rule.name
))
lb_rule.assign(self.apiclient, [virtual_machine])
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Starting router ID: %s" % master_router.id)
try:
Router.start(self.apiclient, id=master_router.id)
except Exception as e:
self.fail("Failed to start master router..")
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
for router in routers:
self.assertEqual(
router.state,
"Running",
"Router state should be running"
)
return
| try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
self.debug("Warning: Exception during cleanup : %s" % e)
#raise Exception("Warning: Exception during cleanup : %s" % e)
return |
route.go | // Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package route
import (
"fmt"
"sort"
"strconv"
"strings"
"time"
core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
route "github.com/envoyproxy/go-control-plane/envoy/api/v2/route"
xdsfault "github.com/envoyproxy/go-control-plane/envoy/config/filter/fault/v2"
xdshttpfault "github.com/envoyproxy/go-control-plane/envoy/config/filter/http/fault/v2"
xdstype "github.com/envoyproxy/go-control-plane/envoy/type"
matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher"
xdsutil "github.com/envoyproxy/go-control-plane/pkg/wellknown"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/duration"
"github.com/golang/protobuf/ptypes/wrappers"
"istio.io/istio/pkg/util/gogo"
networking "istio.io/api/networking/v1alpha3"
"istio.io/pkg/log"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/route/retry"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
)
// Headers with special meaning in Envoy
const (
HeaderMethod = ":method"
HeaderAuthority = ":authority"
HeaderScheme = ":scheme"
)
// DefaultRouteName is the name assigned to a route generated by default in absence of a virtual service.
const DefaultRouteName = "default"
// maxRegExProgramSize defines the max regx complexity supported. 1024 is a safe default and should work
// for most cases. We should look to make it configurable if this is not sufficient.
// Note that this is different from length of regex.
// Refer to https://github.com/google/re2/blob/a98fad02c421896bc75d97f49ccd245cdce7dd55/re2/re2.h#L287 for details.
const maxRegExProgramSize = 1024
var (
regexEngine = &matcher.RegexMatcher_GoogleRe2{GoogleRe2: &matcher.RegexMatcher_GoogleRE2{
MaxProgramSize: &wrappers.UInt32Value{
Value: uint32(maxRegExProgramSize),
},
}}
)
// VirtualHostWrapper is a context-dependent virtual host entry with guarded routes.
// Note: Currently we are not fully utilizing this structure. We could invoke this logic
// once for all sidecars in the cluster to compute all RDS for inside the mesh and arrange
// it by listener port. However to properly use such an optimization, we need to have an
// eventing subsystem to invalidate the computed routes if any service changes/virtual services change.
type VirtualHostWrapper struct {
// Port is the listener port for outbound sidecar (e.g. service port)
Port int
// Services are the services from the registry. Each service
// in this list should have a virtual host entry
Services []*model.Service
// VirtualServiceHosts is a list of hosts defined in the virtual service
// if virtual service hostname is same as a the service registry host, then | // the host would appear in Services as we need to generate all variants of the
// service's hostname within a platform (e.g., foo, foo.default, foo.default.svc, etc.)
VirtualServiceHosts []string
// Routes in the virtual host
Routes []*route.Route
}
// BuildSidecarVirtualHostsFromConfigAndRegistry creates virtual hosts from
// the given set of virtual services and a list of services from the
// service registry. Services are indexed by FQDN hostnames.
func BuildSidecarVirtualHostsFromConfigAndRegistry(
node *model.Proxy,
push *model.PushContext,
serviceRegistry map[host.Name]*model.Service,
virtualServices []model.Config,
listenPort int) []VirtualHostWrapper {
out := make([]VirtualHostWrapper, 0)
// translate all virtual service configs into virtual hosts
for _, virtualService := range virtualServices {
wrappers := buildSidecarVirtualHostsForVirtualService(node, push, virtualService, serviceRegistry, listenPort)
if len(wrappers) == 0 {
// If none of the routes matched by source (i.e. proxyLabels), then discard this entire virtual service
continue
}
out = append(out, wrappers...)
}
// compute services missing virtual service configs
missing := make(map[host.Name]bool)
for fqdn := range serviceRegistry {
missing[fqdn] = true
}
for _, wrapper := range out {
for _, service := range wrapper.Services {
delete(missing, service.Hostname)
}
}
// append default hosts for the service missing virtual services
for fqdn := range missing {
svc := serviceRegistry[fqdn]
for _, port := range svc.Ports {
if port.Protocol.IsHTTP() || util.IsProtocolSniffingEnabledForPort(node, port) {
cluster := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", svc.Hostname, port.Port)
traceOperation := fmt.Sprintf("%s:%d/*", svc.Hostname, port.Port)
httpRoute := BuildDefaultHTTPOutboundRoute(node, cluster, traceOperation)
// if this host has no virtualservice, the consistentHash on its destinationRule will be useless
if hashPolicy := getHashPolicyByService(node, push, svc, port); hashPolicy != nil {
httpRoute.GetRoute().HashPolicy = []*route.RouteAction_HashPolicy{hashPolicy}
}
out = append(out, VirtualHostWrapper{
Port: port.Port,
Services: []*model.Service{svc},
Routes: []*route.Route{httpRoute},
})
}
}
}
return out
}
// separateVSHostsAndServices splits the virtual service hosts into services (if they are found in the registry) and
// plain non-registry hostnames
func separateVSHostsAndServices(virtualService model.Config,
serviceRegistry map[host.Name]*model.Service) ([]string, []*model.Service) {
rule := virtualService.Spec.(*networking.VirtualService)
hosts := make([]string, 0)
servicesInVirtualService := make([]*model.Service, 0)
for _, hostname := range rule.Hosts {
// Say host is *.global
vsHostname := host.Name(hostname)
foundSvcMatch := false
// TODO: Optimize me. This is O(n2) or worse. Need to prune at top level in config
// Say we have services *.foo.global, *.bar.global
for svcHost, svc := range serviceRegistry {
// *.foo.global matches *.global
if svcHost.Matches(vsHostname) {
servicesInVirtualService = append(servicesInVirtualService, svc)
foundSvcMatch = true
}
}
if !foundSvcMatch {
hosts = append(hosts, hostname)
}
}
return hosts, servicesInVirtualService
}
// buildSidecarVirtualHostsForVirtualService creates virtual hosts corresponding to a virtual service.
// Called for each port to determine the list of vhosts on the given port.
// It may return an empty list if no VirtualService rule has a matching service.
func buildSidecarVirtualHostsForVirtualService(
node *model.Proxy,
push *model.PushContext,
virtualService model.Config,
serviceRegistry map[host.Name]*model.Service,
listenPort int) []VirtualHostWrapper {
hosts, servicesInVirtualService := separateVSHostsAndServices(virtualService, serviceRegistry)
// Now group these services by port so that we can infer the destination.port if the user
// doesn't specify any port for a multiport service. We need to know the destination port in
// order to build the cluster name (outbound|<port>|<subset>|<serviceFQDN>)
// If the destination service is being accessed on port X, we set that as the default
// destination port
serviceByPort := make(map[int][]*model.Service)
for _, svc := range servicesInVirtualService {
for _, port := range svc.Ports {
if port.Protocol.IsHTTP() || util.IsProtocolSniffingEnabledForPort(node, port) {
serviceByPort[port.Port] = append(serviceByPort[port.Port], svc)
}
}
}
// We need to group the virtual hosts by port, because each http connection manager is
// going to send a separate RDS request
// Note that we need to build non-default HTTP routes only for the virtual services.
// The services in the serviceRegistry will always have a default route (/)
if len(serviceByPort) == 0 {
// This is a gross HACK. Fix me. Its a much bigger surgery though, due to the way
// the current code is written.
serviceByPort[80] = nil
}
meshGateway := map[string]bool{constants.IstioMeshGateway: true}
out := make([]VirtualHostWrapper, 0, len(serviceByPort))
for port, portServices := range serviceByPort {
routes, err := BuildHTTPRoutesForVirtualService(node, push, virtualService, serviceRegistry, listenPort, meshGateway)
if err != nil || len(routes) == 0 {
continue
}
out = append(out, VirtualHostWrapper{
Port: port,
Services: portServices,
VirtualServiceHosts: hosts,
Routes: routes,
})
}
return out
}
// GetDestinationCluster generates a cluster name for the route, or error if no cluster
// can be found. Called by translateRule to determine if
func GetDestinationCluster(destination *networking.Destination, service *model.Service, listenerPort int) string {
port := listenerPort
if destination.GetPort() != nil {
port = int(destination.GetPort().GetNumber())
} else if service != nil && len(service.Ports) == 1 {
// if service only has one port defined, use that as the port, otherwise use default listenerPort
port = service.Ports[0].Port
// Do not return blackhole cluster for service==nil case as there is a legitimate use case for
// calling this function with nil service: to route to a pre-defined statically configured cluster
// declared as part of the bootstrap.
// If blackhole cluster is needed, do the check on the caller side. See gateway and tls.go for examples.
}
return model.BuildSubsetKey(model.TrafficDirectionOutbound, destination.Subset, host.Name(destination.Host), port)
}
// BuildHTTPRoutesForVirtualService creates data plane HTTP routes from the virtual service spec.
// The rule should be adapted to destination names (outbound clusters).
// Each rule is guarded by source labels.
//
// This is called for each port to compute virtual hosts.
// Each VirtualService is tried, with a list of services that listen on the port.
// Error indicates the given virtualService can't be used on the port.
// This function is used by both the gateway and the sidecar
func BuildHTTPRoutesForVirtualService(
node *model.Proxy,
push *model.PushContext,
virtualService model.Config,
serviceRegistry map[host.Name]*model.Service,
listenPort int,
gatewayNames map[string]bool) ([]*route.Route, error) {
vs, ok := virtualService.Spec.(*networking.VirtualService)
if !ok { // should never happen
return nil, fmt.Errorf("in not a virtual service: %#v", virtualService)
}
out := make([]*route.Route, 0, len(vs.Http))
for _, http := range vs.Http {
if len(http.Match) == 0 {
if r := translateRoute(push, node, http, nil, listenPort, virtualService, serviceRegistry, gatewayNames); r != nil {
out = append(out, r)
}
// We have a rule with catch all match prefix: /. Other rules are of no use.
break
} else {
if match := catchAllMatch(http); match != nil {
// We have a catch all match block in the route, check if it is valid - A catch all match block is not valid
// (translateRoute returns nil), if source or port match fails.
if r := translateRoute(push, node, http, match, listenPort, virtualService, serviceRegistry, gatewayNames); r != nil {
// We have a valid catch all route. No point building other routes, with match conditions.
out = append(out, r)
break
}
}
for _, match := range http.Match {
if r := translateRoute(push, node, http, match, listenPort, virtualService, serviceRegistry, gatewayNames); r != nil {
out = append(out, r)
}
}
}
}
if len(out) == 0 {
return nil, fmt.Errorf("no routes matched")
}
return out, nil
}
// sourceMatchHttp checks if the sourceLabels or the gateways in a match condition match with the
// labels for the proxy or the gateway name for which we are generating a route
func sourceMatchHTTP(match *networking.HTTPMatchRequest, proxyLabels labels.Collection, gatewayNames map[string]bool) bool {
if match == nil {
return true
}
// Trim by source labels or mesh gateway
if len(match.Gateways) > 0 {
for _, g := range match.Gateways {
if gatewayNames[g] {
return true
}
}
} else if proxyLabels.IsSupersetOf(match.GetSourceLabels()) {
return true
}
return false
}
// translateRoute translates HTTP routes
func translateRoute(push *model.PushContext, node *model.Proxy, in *networking.HTTPRoute,
match *networking.HTTPMatchRequest, port int,
virtualService model.Config,
serviceRegistry map[host.Name]*model.Service,
gatewayNames map[string]bool) *route.Route {
// When building routes, its okay if the target cluster cannot be
// resolved Traffic to such clusters will blackhole.
// Match by source labels/gateway names inside the match condition
if !sourceMatchHTTP(match, labels.Collection{node.Metadata.Labels}, gatewayNames) {
return nil
}
// Match by the destination port specified in the match condition
if match != nil && match.Port != 0 && match.Port != uint32(port) {
return nil
}
out := &route.Route{
Match: translateRouteMatch(match, node),
Metadata: util.BuildConfigInfoMetadata(virtualService.ConfigMeta),
}
if util.IsIstioVersionGE13(node) {
routeName := in.Name
if match != nil && match.Name != "" {
routeName = routeName + "." + match.Name
}
out.Name = routeName
// add a name to the route
}
out.TypedPerFilterConfig = make(map[string]*any.Any)
if redirect := in.Redirect; redirect != nil {
action := &route.Route_Redirect{
Redirect: &route.RedirectAction{
HostRedirect: redirect.Authority,
PathRewriteSpecifier: &route.RedirectAction_PathRedirect{
PathRedirect: redirect.Uri,
},
}}
switch in.Redirect.RedirectCode {
case 0, 301:
action.Redirect.ResponseCode = route.RedirectAction_MOVED_PERMANENTLY
case 302:
action.Redirect.ResponseCode = route.RedirectAction_FOUND
case 303:
action.Redirect.ResponseCode = route.RedirectAction_SEE_OTHER
case 307:
action.Redirect.ResponseCode = route.RedirectAction_TEMPORARY_REDIRECT
case 308:
action.Redirect.ResponseCode = route.RedirectAction_PERMANENT_REDIRECT
default:
log.Warnf("Redirect Code %d are not yet supported", in.Redirect.RedirectCode)
action = nil
}
out.Action = action
} else {
action := &route.RouteAction{
Cors: translateCORSPolicy(in.CorsPolicy),
RetryPolicy: retry.ConvertPolicy(in.Retries),
}
if in.Timeout != nil {
d := gogo.DurationToProtoDuration(in.Timeout)
// timeout
action.Timeout = d
action.MaxGrpcTimeout = d
} else {
// if no timeout is specified, disable timeouts. This is easier
// to reason about than assuming some defaults.
d := ptypes.DurationProto(0 * time.Second)
action.Timeout = d
action.MaxGrpcTimeout = d
}
out.Action = &route.Route_Route{Route: action}
if rewrite := in.Rewrite; rewrite != nil {
action.PrefixRewrite = rewrite.Uri
action.HostRewriteSpecifier = &route.RouteAction_HostRewrite{
HostRewrite: rewrite.Authority,
}
}
requestHeadersToAdd := translateAppendHeaders(in.Headers.GetRequest().GetSet(), false)
requestHeadersToAdd = append(requestHeadersToAdd, translateAppendHeaders(in.Headers.GetRequest().GetAdd(), true)...)
out.RequestHeadersToAdd = requestHeadersToAdd
responseHeadersToAdd := translateAppendHeaders(in.Headers.GetResponse().GetSet(), false)
responseHeadersToAdd = append(responseHeadersToAdd, translateAppendHeaders(in.Headers.GetResponse().GetAdd(), true)...)
out.ResponseHeadersToAdd = responseHeadersToAdd
requestHeadersToRemove := make([]string, 0)
requestHeadersToRemove = append(requestHeadersToRemove, in.Headers.GetRequest().GetRemove()...)
out.RequestHeadersToRemove = requestHeadersToRemove
responseHeadersToRemove := make([]string, 0)
responseHeadersToRemove = append(responseHeadersToRemove, in.Headers.GetResponse().GetRemove()...)
out.ResponseHeadersToRemove = responseHeadersToRemove
if in.Mirror != nil {
if mp := mirrorPercent(in); mp != nil {
action.RequestMirrorPolicy = &route.RouteAction_RequestMirrorPolicy{
Cluster: GetDestinationCluster(in.Mirror, serviceRegistry[host.Name(in.Mirror.Host)], port),
RuntimeFraction: mp,
}
}
}
// TODO: eliminate this logic and use the total_weight option in envoy route
weighted := make([]*route.WeightedCluster_ClusterWeight, 0)
for _, dst := range in.Route {
weight := &wrappers.UInt32Value{Value: uint32(dst.Weight)}
if dst.Weight == 0 {
// Ignore 0 weighted clusters if there are other clusters in the route.
// But if this is the only cluster in the route, then add it as a cluster with weight 100
if len(in.Route) == 1 {
weight.Value = uint32(100)
} else {
continue
}
}
requestHeadersToAdd := translateAppendHeaders(dst.Headers.GetRequest().GetSet(), false)
requestHeadersToAdd = append(requestHeadersToAdd, translateAppendHeaders(dst.Headers.GetRequest().GetAdd(), true)...)
responseHeadersToAdd := translateAppendHeaders(dst.Headers.GetResponse().GetSet(), false)
responseHeadersToAdd = append(responseHeadersToAdd, translateAppendHeaders(dst.Headers.GetResponse().GetAdd(), true)...)
requestHeadersToRemove := make([]string, 0)
requestHeadersToRemove = append(requestHeadersToRemove, dst.Headers.GetRequest().GetRemove()...)
responseHeadersToRemove := make([]string, 0)
responseHeadersToRemove = append(responseHeadersToRemove, dst.Headers.GetResponse().GetRemove()...)
hostname := host.Name(dst.GetDestination().GetHost())
n := GetDestinationCluster(dst.Destination, serviceRegistry[hostname], port)
clusterWeight := &route.WeightedCluster_ClusterWeight{
Name: n,
Weight: weight,
RequestHeadersToAdd: requestHeadersToAdd,
RequestHeadersToRemove: requestHeadersToRemove,
ResponseHeadersToAdd: responseHeadersToAdd,
ResponseHeadersToRemove: responseHeadersToRemove,
}
weighted = append(weighted, clusterWeight)
var configNamespace string
if serviceRegistry[hostname] != nil {
configNamespace = serviceRegistry[hostname].Attributes.Namespace
}
hashPolicy := getHashPolicy(push, node, dst, configNamespace)
if hashPolicy != nil {
action.HashPolicy = append(action.HashPolicy, hashPolicy)
}
}
// rewrite to a single cluster if there is only weighted cluster
if len(weighted) == 1 {
action.ClusterSpecifier = &route.RouteAction_Cluster{Cluster: weighted[0].Name}
out.RequestHeadersToAdd = append(out.RequestHeadersToAdd, weighted[0].RequestHeadersToAdd...)
out.RequestHeadersToRemove = append(out.RequestHeadersToRemove, weighted[0].RequestHeadersToRemove...)
out.ResponseHeadersToAdd = append(out.ResponseHeadersToAdd, weighted[0].ResponseHeadersToAdd...)
out.ResponseHeadersToRemove = append(out.ResponseHeadersToRemove, weighted[0].ResponseHeadersToRemove...)
} else {
action.ClusterSpecifier = &route.RouteAction_WeightedClusters{
WeightedClusters: &route.WeightedCluster{
Clusters: weighted,
},
}
}
}
out.Decorator = &route.Decorator{
Operation: getRouteOperation(out, virtualService.Name, port),
}
if fault := in.Fault; fault != nil {
out.TypedPerFilterConfig[xdsutil.Fault] = util.MessageToAny(translateFault(in.Fault))
}
return out
}
// SortHeaderValueOption type and the functions below (Len, Less and Swap) are for sort.Stable for type HeaderValueOption
type SortHeaderValueOption []*core.HeaderValueOption
// mirrorPercent computes the mirror percent to be used based on "Mirror" data in route.
func mirrorPercent(in *networking.HTTPRoute) *core.RuntimeFractionalPercent {
switch {
case in.MirrorPercentage != nil:
if in.MirrorPercentage.GetValue() > 0 {
return &core.RuntimeFractionalPercent{
DefaultValue: translatePercentToFractionalPercent(in.MirrorPercentage),
}
}
// If zero percent is provided explicitly, we should not mirror.
return nil
case in.MirrorPercent != nil:
if in.MirrorPercent.GetValue() > 0 {
return &core.RuntimeFractionalPercent{
DefaultValue: translateIntegerToFractionalPercent((int32(in.MirrorPercent.GetValue()))),
}
}
// If zero percent is provided explicitly, we should not mirror.
return nil
default:
// Default to 100 percent if percent is not given.
return &core.RuntimeFractionalPercent{
DefaultValue: translateIntegerToFractionalPercent(100),
}
}
}
// Len is i the sort.Interface for SortHeaderValueOption
func (b SortHeaderValueOption) Len() int {
return len(b)
}
// Less is in the sort.Interface for SortHeaderValueOption
func (b SortHeaderValueOption) Less(i, j int) bool {
if b[i] == nil || b[i].Header == nil {
return false
} else if b[j] == nil || b[j].Header == nil {
return true
}
return strings.Compare(b[i].Header.Key, b[j].Header.Key) < 0
}
// Swap is in the sort.Interface for SortHeaderValueOption
func (b SortHeaderValueOption) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}
// translateAppendHeaders translates headers
func translateAppendHeaders(headers map[string]string, appendFlag bool) []*core.HeaderValueOption {
headerValueOptionList := make([]*core.HeaderValueOption, 0, len(headers))
for key, value := range headers {
headerValueOptionList = append(headerValueOptionList, &core.HeaderValueOption{
Header: &core.HeaderValue{
Key: key,
Value: value,
},
Append: &wrappers.BoolValue{Value: appendFlag},
})
}
sort.Stable(SortHeaderValueOption(headerValueOptionList))
return headerValueOptionList
}
// translateRouteMatch translates match condition
func translateRouteMatch(in *networking.HTTPMatchRequest, node *model.Proxy) *route.RouteMatch {
out := &route.RouteMatch{PathSpecifier: &route.RouteMatch_Prefix{Prefix: "/"}}
if in == nil {
return out
}
for name, stringMatch := range in.Headers {
matcher := translateHeaderMatch(name, stringMatch, node)
out.Headers = append(out.Headers, &matcher)
}
if util.IsIstioVersionGE14(node) {
for name, stringMatch := range in.WithoutHeaders {
matcher := translateHeaderMatch(name, stringMatch, node)
matcher.InvertMatch = true
out.Headers = append(out.Headers, &matcher)
}
}
// guarantee ordering of headers
sort.Slice(out.Headers, func(i, j int) bool {
return out.Headers[i].Name < out.Headers[j].Name
})
if in.Uri != nil {
switch m := in.Uri.MatchType.(type) {
case *networking.StringMatch_Exact:
out.PathSpecifier = &route.RouteMatch_Path{Path: m.Exact}
case *networking.StringMatch_Prefix:
out.PathSpecifier = &route.RouteMatch_Prefix{Prefix: m.Prefix}
case *networking.StringMatch_Regex:
if !util.IsIstioVersionGE14(node) {
out.PathSpecifier = &route.RouteMatch_Regex{Regex: m.Regex}
} else {
out.PathSpecifier = &route.RouteMatch_SafeRegex{
SafeRegex: &matcher.RegexMatcher{
EngineType: &matcher.RegexMatcher_GoogleRe2{GoogleRe2: &matcher.RegexMatcher_GoogleRE2{
MaxProgramSize: &wrappers.UInt32Value{
Value: uint32(maxRegExProgramSize),
},
}},
Regex: m.Regex,
},
}
}
}
}
out.CaseSensitive = &wrappers.BoolValue{Value: !in.IgnoreUriCase}
if in.Method != nil {
matcher := translateHeaderMatch(HeaderMethod, in.Method, node)
out.Headers = append(out.Headers, &matcher)
}
if in.Authority != nil {
matcher := translateHeaderMatch(HeaderAuthority, in.Authority, node)
out.Headers = append(out.Headers, &matcher)
}
if in.Scheme != nil {
matcher := translateHeaderMatch(HeaderScheme, in.Scheme, node)
out.Headers = append(out.Headers, &matcher)
}
for name, stringMatch := range in.QueryParams {
matcher := translateQueryParamMatch(name, stringMatch)
out.QueryParameters = append(out.QueryParameters, &matcher)
}
return out
}
// translateQueryParamMatch translates a StringMatch to a QueryParameterMatcher.
func translateQueryParamMatch(name string, in *networking.StringMatch) route.QueryParameterMatcher {
out := route.QueryParameterMatcher{
Name: name,
}
switch m := in.MatchType.(type) {
case *networking.StringMatch_Exact:
out.QueryParameterMatchSpecifier = &route.QueryParameterMatcher_StringMatch{
StringMatch: &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_Exact{Exact: m.Exact}},
}
case *networking.StringMatch_Regex:
out.QueryParameterMatchSpecifier = &route.QueryParameterMatcher_StringMatch{
StringMatch: &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_SafeRegex{
SafeRegex: &matcher.RegexMatcher{
EngineType: &matcher.RegexMatcher_GoogleRe2{GoogleRe2: &matcher.RegexMatcher_GoogleRE2{
MaxProgramSize: &wrappers.UInt32Value{
Value: uint32(maxRegExProgramSize),
},
}},
Regex: m.Regex,
},
},
}}
}
return out
}
// isCatchAllHeaderMatch determines if the given header is matched with all strings or not.
// Currently, if the regex has "*" value, it returns true
func isCatchAllHeaderMatch(in *networking.StringMatch) bool {
catchall := false
if in == nil {
return true
}
switch m := in.MatchType.(type) {
case *networking.StringMatch_Regex:
catchall = m.Regex == "*"
}
return catchall
}
// translateHeaderMatch translates to HeaderMatcher
func translateHeaderMatch(name string, in *networking.StringMatch, node *model.Proxy) route.HeaderMatcher {
out := route.HeaderMatcher{
Name: name,
}
if isCatchAllHeaderMatch(in) {
out.HeaderMatchSpecifier = &route.HeaderMatcher_PresentMatch{PresentMatch: true}
return out
}
switch m := in.MatchType.(type) {
case *networking.StringMatch_Exact:
out.HeaderMatchSpecifier = &route.HeaderMatcher_ExactMatch{ExactMatch: m.Exact}
case *networking.StringMatch_Prefix:
// Envoy regex grammar is ECMA-262 (http://en.cppreference.com/w/cpp/regex/ecmascript)
// Golang has a slightly different regex grammar
out.HeaderMatchSpecifier = &route.HeaderMatcher_PrefixMatch{PrefixMatch: m.Prefix}
case *networking.StringMatch_Regex:
if !util.IsIstioVersionGE14(node) {
out.HeaderMatchSpecifier = &route.HeaderMatcher_RegexMatch{RegexMatch: m.Regex}
} else {
out.HeaderMatchSpecifier = &route.HeaderMatcher_SafeRegexMatch{
SafeRegexMatch: &matcher.RegexMatcher{
EngineType: regexEngine,
Regex: m.Regex,
},
}
}
}
return out
}
func stringToExactMatch(in []string) []*matcher.StringMatcher {
res := make([]*matcher.StringMatcher, 0, len(in))
for _, s := range in {
res = append(res, &matcher.StringMatcher{
MatchPattern: &matcher.StringMatcher_Exact{Exact: s},
})
}
return res
}
func convertToEnvoyMatch(in []*networking.StringMatch) []*matcher.StringMatcher {
res := make([]*matcher.StringMatcher, 0, len(in))
for _, istioMatcher := range in {
switch m := istioMatcher.MatchType.(type) {
case *networking.StringMatch_Exact:
res = append(res, &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_Exact{m.Exact}})
case *networking.StringMatch_Prefix:
res = append(res, &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_Prefix{m.Prefix}})
case *networking.StringMatch_Regex:
res = append(res, &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_SafeRegex{
SafeRegex: &matcher.RegexMatcher{
EngineType: regexEngine,
Regex: m.Regex,
},
},
})
}
}
return res
}
// translateCORSPolicy translates CORS policy
func translateCORSPolicy(in *networking.CorsPolicy) *route.CorsPolicy {
if in == nil {
return nil
}
// CORS filter is enabled by default
out := route.CorsPolicy{}
if in.AllowOrigin != nil {
out.AllowOriginStringMatch = stringToExactMatch(in.AllowOrigin)
} else {
out.AllowOriginStringMatch = convertToEnvoyMatch(in.AllowOrigins)
}
out.EnabledSpecifier = &route.CorsPolicy_FilterEnabled{
FilterEnabled: &core.RuntimeFractionalPercent{
DefaultValue: &xdstype.FractionalPercent{
Numerator: 100,
Denominator: xdstype.FractionalPercent_HUNDRED,
},
},
}
out.AllowCredentials = gogo.BoolToProtoBool(in.AllowCredentials)
out.AllowHeaders = strings.Join(in.AllowHeaders, ",")
out.AllowMethods = strings.Join(in.AllowMethods, ",")
out.ExposeHeaders = strings.Join(in.ExposeHeaders, ",")
if in.MaxAge != nil {
out.MaxAge = strconv.FormatInt(in.MaxAge.GetSeconds(), 10)
}
return &out
}
// getRouteOperation returns readable route description for trace.
func getRouteOperation(in *route.Route, vsName string, port int) string {
path := "/*"
m := in.GetMatch()
ps := m.GetPathSpecifier()
if ps != nil {
switch ps.(type) {
case *route.RouteMatch_Prefix:
path = m.GetPrefix() + "*"
case *route.RouteMatch_Path:
path = m.GetPath()
case *route.RouteMatch_Regex:
//nolint: staticcheck
path = m.GetRegex()
case *route.RouteMatch_SafeRegex:
path = m.GetSafeRegex().GetRegex()
}
}
// If there is only one destination cluster in route, return host:port/uri as description of route.
// Otherwise there are multiple destination clusters and destination host is not clear. For that case
// return virtual serivce name:port/uri as substitute.
if c := in.GetRoute().GetCluster(); model.IsValidSubsetKey(c) {
// Parse host and port from cluster name.
_, _, h, p := model.ParseSubsetKey(c)
return string(h) + ":" + strconv.Itoa(p) + path
}
return vsName + ":" + strconv.Itoa(port) + path
}
// BuildDefaultHTTPInboundRoute builds a default inbound route.
func BuildDefaultHTTPInboundRoute(node *model.Proxy, clusterName string, operation string) *route.Route {
notimeout := ptypes.DurationProto(0 * time.Second)
val := &route.Route{
Match: translateRouteMatch(nil, node),
Decorator: &route.Decorator{
Operation: operation,
},
Action: &route.Route_Route{
Route: &route.RouteAction{
ClusterSpecifier: &route.RouteAction_Cluster{Cluster: clusterName},
Timeout: notimeout,
MaxGrpcTimeout: notimeout,
},
},
}
if util.IsIstioVersionGE13(node) {
val.Name = DefaultRouteName
}
return val
}
// BuildDefaultHTTPOutboundRoute builds a default outbound route, including a retry policy.
func BuildDefaultHTTPOutboundRoute(node *model.Proxy, clusterName string, operation string) *route.Route {
// Start with the same configuration as for inbound.
out := BuildDefaultHTTPInboundRoute(node, clusterName, operation)
// Add a default retry policy for outbound routes.
out.GetRoute().RetryPolicy = retry.DefaultPolicy()
return out
}
// translatePercentToFractionalPercent translates an v1alpha3 Percent instance
// to an envoy.type.FractionalPercent instance.
func translatePercentToFractionalPercent(p *networking.Percent) *xdstype.FractionalPercent {
return &xdstype.FractionalPercent{
Numerator: uint32(p.Value * 10000),
Denominator: xdstype.FractionalPercent_MILLION,
}
}
// translateIntegerToFractionalPercent translates an int32 instance to an
// envoy.type.FractionalPercent instance.
func translateIntegerToFractionalPercent(p int32) *xdstype.FractionalPercent {
return &xdstype.FractionalPercent{
Numerator: uint32(p),
Denominator: xdstype.FractionalPercent_HUNDRED,
}
}
// translateFault translates networking.HTTPFaultInjection into Envoy's HTTPFault
func translateFault(in *networking.HTTPFaultInjection) *xdshttpfault.HTTPFault {
if in == nil {
return nil
}
out := xdshttpfault.HTTPFault{}
if in.Delay != nil {
out.Delay = &xdsfault.FaultDelay{Type: xdsfault.FaultDelay_FIXED}
if in.Delay.Percentage != nil {
out.Delay.Percentage = translatePercentToFractionalPercent(in.Delay.Percentage)
} else {
out.Delay.Percentage = translateIntegerToFractionalPercent(in.Delay.Percent)
}
switch d := in.Delay.HttpDelayType.(type) {
case *networking.HTTPFaultInjection_Delay_FixedDelay:
out.Delay.FaultDelaySecifier = &xdsfault.FaultDelay_FixedDelay{
FixedDelay: gogo.DurationToProtoDuration(d.FixedDelay),
}
default:
log.Warnf("Exponential faults are not yet supported")
out.Delay = nil
}
}
if in.Abort != nil {
out.Abort = &xdshttpfault.FaultAbort{}
if in.Abort.Percentage != nil {
out.Abort.Percentage = translatePercentToFractionalPercent(in.Abort.Percentage)
}
switch a := in.Abort.ErrorType.(type) {
case *networking.HTTPFaultInjection_Abort_HttpStatus:
out.Abort.ErrorType = &xdshttpfault.FaultAbort_HttpStatus{
HttpStatus: uint32(a.HttpStatus),
}
default:
log.Warnf("Non-HTTP type abort faults are not yet supported")
out.Abort = nil
}
}
if out.Delay == nil && out.Abort == nil {
return nil
}
return &out
}
func portLevelSettingsConsistentHash(dst *networking.Destination,
pls []*networking.TrafficPolicy_PortTrafficPolicy) *networking.LoadBalancerSettings_ConsistentHashLB {
if dst.Port != nil {
portNumber := dst.GetPort().GetNumber()
for _, setting := range pls {
number := setting.GetPort().GetNumber()
if number == portNumber {
return setting.GetLoadBalancer().GetConsistentHash()
}
}
}
return nil
}
func consistentHashToHashPolicy(consistentHash *networking.LoadBalancerSettings_ConsistentHashLB) *route.RouteAction_HashPolicy {
switch consistentHash.GetHashKey().(type) {
case *networking.LoadBalancerSettings_ConsistentHashLB_HttpHeaderName:
return &route.RouteAction_HashPolicy{
PolicySpecifier: &route.RouteAction_HashPolicy_Header_{
Header: &route.RouteAction_HashPolicy_Header{
HeaderName: consistentHash.GetHttpHeaderName(),
},
},
}
case *networking.LoadBalancerSettings_ConsistentHashLB_HttpCookie:
cookie := consistentHash.GetHttpCookie()
var ttl *duration.Duration
if cookie.GetTtl() != nil {
ttl = ptypes.DurationProto(*cookie.GetTtl())
}
return &route.RouteAction_HashPolicy{
PolicySpecifier: &route.RouteAction_HashPolicy_Cookie_{
Cookie: &route.RouteAction_HashPolicy_Cookie{
Name: cookie.GetName(),
Ttl: ttl,
Path: cookie.GetPath(),
},
},
}
case *networking.LoadBalancerSettings_ConsistentHashLB_UseSourceIp:
return &route.RouteAction_HashPolicy{
PolicySpecifier: &route.RouteAction_HashPolicy_ConnectionProperties_{
ConnectionProperties: &route.RouteAction_HashPolicy_ConnectionProperties{
SourceIp: consistentHash.GetUseSourceIp(),
},
},
}
}
return nil
}
func getHashPolicyByService(node *model.Proxy, push *model.PushContext, svc *model.Service, port *model.Port) *route.RouteAction_HashPolicy {
if push == nil {
return nil
}
destinationRule := push.DestinationRule(node, svc)
if destinationRule == nil {
return nil
}
rule := destinationRule.Spec.(*networking.DestinationRule)
consistentHash := rule.GetTrafficPolicy().GetLoadBalancer().GetConsistentHash()
portLevelSettings := rule.GetTrafficPolicy().GetPortLevelSettings()
for _, setting := range portLevelSettings {
number := setting.GetPort().GetNumber()
if int(number) == port.Port {
consistentHash = setting.GetLoadBalancer().GetConsistentHash()
break
}
}
return consistentHashToHashPolicy(consistentHash)
}
func getHashPolicy(push *model.PushContext, node *model.Proxy, dst *networking.HTTPRouteDestination,
configNamespace string) *route.RouteAction_HashPolicy {
if push == nil {
return nil
}
destination := dst.GetDestination()
destinationRule := push.DestinationRule(node,
&model.Service{
Hostname: host.Name(destination.Host),
Attributes: model.ServiceAttributes{Namespace: configNamespace},
})
if destinationRule == nil {
return nil
}
rule := destinationRule.Spec.(*networking.DestinationRule)
consistentHash := rule.GetTrafficPolicy().GetLoadBalancer().GetConsistentHash()
portLevelSettings := rule.GetTrafficPolicy().GetPortLevelSettings()
plsHash := portLevelSettingsConsistentHash(destination, portLevelSettings)
var subsetHash, subsetPLSHash *networking.LoadBalancerSettings_ConsistentHashLB
for _, subset := range rule.GetSubsets() {
if subset.GetName() == destination.GetSubset() {
subsetPortLevelSettings := subset.GetTrafficPolicy().GetPortLevelSettings()
subsetHash = subset.GetTrafficPolicy().GetLoadBalancer().GetConsistentHash()
subsetPLSHash = portLevelSettingsConsistentHash(destination, subsetPortLevelSettings)
break
}
}
switch {
case subsetPLSHash != nil:
consistentHash = subsetPLSHash
case subsetHash != nil:
consistentHash = subsetHash
case plsHash != nil:
consistentHash = plsHash
}
return consistentHashToHashPolicy(consistentHash)
}
// catchAllMatch returns a catch all match block if available in the route, otherwise returns nil.
func catchAllMatch(http *networking.HTTPRoute) *networking.HTTPMatchRequest {
for _, match := range http.Match {
if isCatchAllMatch(match) {
return match
}
}
return nil
}
// isCatchAll returns true if HTTPMatchRequest is a catchall match otherwise false.
func isCatchAllMatch(m *networking.HTTPMatchRequest) bool {
catchall := false
if m.Uri != nil {
switch m := m.Uri.MatchType.(type) {
case *networking.StringMatch_Prefix:
catchall = m.Prefix == "/"
case *networking.StringMatch_Regex:
catchall = m.Regex == "*"
}
}
// A Match is catch all if and only if it has no header/query param match
// and URI has a prefix / or regex *.
return catchall && len(m.Headers) == 0 && len(m.QueryParams) == 0
}
// CombineVHostRoutes semi concatenates two Vhost's routes into a single route set.
// Moves the catch all routes alone to the end, while retaining
// the relative order of other routes in the concatenated route.
// Assumes that the virtual services that generated first and second are ordered by
// time.
func CombineVHostRoutes(first []*route.Route, second []*route.Route) []*route.Route {
allroutes := make([]*route.Route, 0, len(first)+len(second))
catchAllRoutes := make([]*route.Route, 0)
for _, f := range first {
if isCatchAllRoute(f) {
catchAllRoutes = append(catchAllRoutes, f)
} else {
allroutes = append(allroutes, f)
}
}
for _, s := range second {
if isCatchAllRoute(s) {
catchAllRoutes = append(catchAllRoutes, s)
} else {
allroutes = append(allroutes, s)
}
}
allroutes = append(allroutes, catchAllRoutes...)
return allroutes
}
// isCatchAllRoute returns true if an Envoy route is a catchall route otherwise false.
func isCatchAllRoute(r *route.Route) bool {
catchall := false
switch ir := r.Match.PathSpecifier.(type) {
case *route.RouteMatch_Prefix:
catchall = ir.Prefix == "/"
case *route.RouteMatch_Regex:
catchall = ir.Regex == "*"
case *route.RouteMatch_SafeRegex:
catchall = ir.SafeRegex.GetRegex() == "*"
}
// A Match is catch all if and only if it has no header/query param match
// and URI has a prefix / or regex *.
return catchall && len(r.Match.Headers) == 0 && len(r.Match.QueryParameters) == 0
} | |
naive.rs | use std::thread;
use super::ThreadPool;
use crate::Result;
pub struct NaiveThreadPool;
impl ThreadPool for NaiveThreadPool {
fn new(_: usize) -> Result<NaiveThreadPool> |
fn spawn<F: FnOnce() + Send + 'static>(&self, job: F) {
thread::spawn(job);
}
}
| {
Ok(NaiveThreadPool)
} |
Image.ts | export default class | {
constructor(
public src: string,
public title: string,
public description: string,
) {}
}
| Image |
artificial_measure.py |
"""
artificial measure
------------------
Creation of artificial measure
"""
import numpy as np
############################### Create measure ################################
###############################################################################
def create_artificial_measure_array(n_k, n_vals_i, n_feats):
"""Create artificial random measure in the array form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: np.ndarray
the transformed measure computed by the whole spatial descriptor model.
"""
measure = np.random.random((n_vals_i, n_feats, n_k))
return measure
def create_artificial_measure_append(n_k, n_vals_i, n_feats):
"""Create artificial random measure in the list form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: list
the transformed measure computed by the whole spatial descriptor model.
"""
rounds = np.random.randint(1, 40)
measure = create_empty_append(n_k, n_vals_i, n_feats)
for i in range(rounds):
n_iss = np.random.randint(1, 10)
vals_i = create_vals_i(n_iss, n_vals_i, n_k)
x_i = create_features_i_dict(n_feats, n_iss, n_k)
for k in range(len(vals_i)):
for i in range(len(vals_i[k])):
measure[k][vals_i[k][i]].append(x_i[k][i])
return measure
def create_artificial_measure_replacelist(n_k, n_vals_i, n_feats,
unique_=False):
"""Create artificial random measure in the replacelist form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
unique_: boolean (default=False)
if there are no collapse.
Returns
-------
measure: list
the transformed measure computed by the whole spatial descriptor model.
"""
last = 0
rounds = np.random.randint(1, 40)
measure = create_empty_replacelist(n_k, n_vals_i, n_feats)
for i in range(rounds):
n_iss = np.random.randint(1, 10)
if unique_:
vals_i = np.array([last+np.arange(n_iss)]*n_k)
last += n_iss
else:
vals_i = create_vals_i(n_iss, n_vals_i, n_k)
x_i = create_features_i_dict(n_feats, n_iss, n_k)
for k in range(len(vals_i)):
measure[k][0].append(x_i[k])
measure[k][1].append(vals_i[k])
return measure
############################### Empty measure #################################
###############################################################################
def create_empty_array(n_k, n_vals_i, n_feats):
"""Create null measure in the array form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: np.ndarray
the null measure to be fill by the computation of the spatial
descriptor model.
"""
return np.zeros((n_vals_i, n_feats, n_k))
def create_empty_append(n_k, n_iss, n_feats):
"""Create null measure in the list form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: list
the null measure to be fill by the computation of the spatial
descriptor model.
"""
return [[[]]*n_iss]*n_k
def create_empty_replacelist(n_k, n_iss, n_feats):
|
############################### Vals_i creation ###############################
###############################################################################
def create_vals_i(n_iss, nvals, n_k):
"""
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
vals_i: np.ndarray
the associated stored indices for the element indices.
"""
return np.random.randint(1, nvals, n_iss*n_k).reshape((n_k, n_iss))
############################### Empty features ################################
###############################################################################
def create_empty_features_array(n_feats, n_iss, n_k):
"""Create null features for different iss in an array-form.
Parameters
----------
n_feats: int
the number of features.
n_iss: int
the number of the elements to create their features.
n_k: int
the number of perturbations.
Returns
-------
features: np.ndarray
the null features we want to compute.
"""
return np.zeros((n_k, n_iss, n_feats))
def create_empty_features_dict(n_feats, n_iss, n_k):
"""Create null features for different iss in an listdict-form.
Parameters
----------
n_feats: int
the number of features.
n_iss: int
the number of the elements to create their features.
n_k: int
the number of perturbations.
Returns
-------
features: list
the null features we want to compute.
"""
return [[{}]*n_iss]*n_k
################################ X_i features #################################
###############################################################################
def create_features_i_array(n_feats, n_iss, n_k):
"""Create null features for different iss in an array-form.
Parameters
----------
n_feats: int
the number of features.
n_iss: int
the number of the elements to create their features.
n_k: int
the number of perturbations.
Returns
-------
features: np.ndarray
the null features we want to compute.
"""
x_i = np.random.random((n_k, n_iss, n_feats))
return x_i
def create_features_i_dict(n_feats, n_iss, n_k):
"""Create null features for different iss in an listdict-form.
Parameters
----------
n_feats: int
the number of features.
n_iss: int
the number of the elements to create their features.
n_k: int
the number of perturbations.
Returns
-------
features: list
the null features we want to compute.
"""
x_i = []
for k in range(n_k):
x_i_k = []
for i in range(n_iss):
keys = np.unique(np.random.randint(1, n_feats, n_feats))
keys = [str(e) for e in keys]
values = np.random.random(len(keys))
x_i_k.append(dict(zip(keys, values)))
x_i.append(x_i_k)
return x_i
| """Create null measure in the replacelist form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: list
the null measure to be fill by the computation of the spatial
descriptor model.
"""
return [[[], []]]*n_k |
ca.ts | export const CA = {
"Click": "Feu clic a",
| "Zoom In": "Apropar",
"Zoom Out": "Disminuir el zoom",
"First Page": "Primera pàgina",
"Previous Page": "Pàgina anterior",
"Next Page": "Pàgina següent",
"Last Page": "Darrera pàgina",
"Rotate CCW": "Gira CCW",
"Rotate CW": "Gira CW",
"Download": "descarregar",
"Print": "Imprimir",
"Search": "Cerca",
"Run presentation": "Executa la presentació",
"Present": "Present",
"Stop": "Atura",
"Stop presenting": "Deixa de presentar-te",
"Resume presenting": "Reprèn la presentació",
"Pause presenting": "Posa en pausa la presentació",
"None": "Cap",
"5 sec": "5 seg",
"10 sec": "10 seg",
"15 sec": "15 seg",
"30 sec": "30 seg",
"Thumbnails": "Miniatures"
}; | "to open file": "per obrir el fitxer",
"Or drop file here": "O deixeu anar el fitxer aquí",
"Browse files": "Cercar fitxers",
|
chatbotconfig.py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
| SECRET_KEY=os.environ.get('SECRET_KEY') or 'you-will-never-guess' |
|
setup__worker.js | importScripts('./brfv5__init__worker.js')
let _width = 0 // will be set by main thread
let _height = 0
const sentTrackFaces = function() { self.postMessage("trackFaces"); }
self.addEventListener('message', function(e) {
let dataBuffer = new Int32Array(e.data);
if(dataBuffer.length === 2) {
setSize(dataBuffer[0], dataBuffer[1])
} else if(_width > 0 && dataBuffer.length === _width * _height) {
_brfv5Manager.update({ data: new Uint8ClampedArray(e.data), width: _width, height: _height })
const faces = _brfv5Manager.getFaces();
if(faces.length > 0) {
const face = faces[0]
const vertices = new Float32Array(face.vertices.length);
for(let k = 0; k < face.vertices.length; k++) {
vertices[k] = face.vertices[k];
}
self.postMessage(vertices.buffer, [vertices.buffer]);
}
}
}, false);
const setSize = (width, height) => {
_width = width
_height = height
configureTracking()
}
loadBRFv5Model('68l', 8, '../brfv5/models/', _appId,
(progress) => { console.log(progress) }).then(({ brfv5Manager, brfv5Config }) => {
console.log('loadBRFv5Model: done')
_brfv5Manager = brfv5Manager
_brfv5Config = brfv5Config
configureTracking()
}).catch((e) => { console.error('BRFv5 failed: ', e) })
const configureTracking = () => {
if(_brfv5Config !== null && _width > 0) {
// Camera stream and BRFv5 are ready. Now configure. Internal defaults are set for a 640x480 resolution.
// So the following isn't really necessary.
const brfv5Config = _brfv5Config
const imageWidth = _width
const imageHeight = _height
const inputSize = imageWidth > imageHeight ? imageHeight : imageWidth
// Setup image data dimensions
brfv5Config.imageConfig.inputWidth = imageWidth
brfv5Config.imageConfig.inputHeight = imageHeight
const sizeFactor = inputSize / 480.0
// Set face detection region of interest and parameters scaled to the image base size.
| brfv5Config.faceDetectionConfig.minFaceSize = 144 * sizeFactor
brfv5Config.faceDetectionConfig.maxFaceSize = 480 * sizeFactor
if(imageWidth < imageHeight) {
// Portrait mode: probably smartphone, faces tend to be closer to the camera, processing time is an issue,
// so save a bit of time and increase minFaceSize.
brfv5Config.faceDetectionConfig.minFaceSize = 240 * sizeFactor
}
// Set face tracking region of interest and parameters scaled to the image base size.
brfv5Config.faceTrackingConfig.regionOfInterest.setTo(0, 0, imageWidth, imageHeight)
brfv5Config.faceTrackingConfig.minFaceScaleStart = 50.0 * sizeFactor
brfv5Config.faceTrackingConfig.maxFaceScaleStart = 320.0 * sizeFactor
brfv5Config.faceTrackingConfig.minFaceScaleReset = 35.0 * sizeFactor
brfv5Config.faceTrackingConfig.maxFaceScaleReset = 420.0 * sizeFactor
brfv5Config.faceTrackingConfig.confidenceThresholdReset = 0.001
brfv5Config.faceTrackingConfig.enableStabilizer = true
brfv5Config.faceTrackingConfig.maxRotationXReset = 35.0
brfv5Config.faceTrackingConfig.maxRotationYReset = 45.0
brfv5Config.faceTrackingConfig.maxRotationZReset = 34.0
brfv5Config.faceTrackingConfig.numTrackingPasses = 3
brfv5Config.faceTrackingConfig.enableFreeRotation = true
brfv5Config.faceTrackingConfig.maxRotationZReset = 999.0
brfv5Config.faceTrackingConfig.numFacesToTrack = 1
brfv5Config.enableFaceTracking = true
console.log('configureTracking:', _brfv5Config)
_brfv5Manager.configure(_brfv5Config)
sentTrackFaces();
}
} | brfv5Config.faceDetectionConfig.regionOfInterest.setTo(0, 0, imageWidth, imageHeight)
|
slice_list_model.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use glib::object::Cast;
use glib::object::IsA;
use glib::object::ObjectType as ObjectType_;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use glib::StaticType;
use glib::ToValue;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib::glib_wrapper! {
pub struct SliceListModel(Object<ffi::GtkSliceListModel, ffi::GtkSliceListModelClass>) @implements gio::ListModel;
match fn {
get_type => || ffi::gtk_slice_list_model_get_type(),
}
}
impl SliceListModel {
#[doc(alias = "gtk_slice_list_model_new")]
pub fn new<P: IsA<gio::ListModel>>(
model: Option<&P>,
offset: u32,
size: u32,
) -> SliceListModel {
assert_initialized_main_thread!();
unsafe {
from_glib_full(ffi::gtk_slice_list_model_new(
model.map(|p| p.as_ref()).to_glib_full(),
offset,
size,
))
}
}
#[doc(alias = "gtk_slice_list_model_get_model")]
pub fn get_model(&self) -> Option<gio::ListModel> {
unsafe { from_glib_none(ffi::gtk_slice_list_model_get_model(self.to_glib_none().0)) }
}
#[doc(alias = "gtk_slice_list_model_get_offset")]
pub fn get_offset(&self) -> u32 {
unsafe { ffi::gtk_slice_list_model_get_offset(self.to_glib_none().0) }
}
#[doc(alias = "gtk_slice_list_model_get_size")]
pub fn get_size(&self) -> u32 {
unsafe { ffi::gtk_slice_list_model_get_size(self.to_glib_none().0) }
}
#[doc(alias = "gtk_slice_list_model_set_model")]
pub fn set_model<P: IsA<gio::ListModel>>(&self, model: Option<&P>) {
unsafe {
ffi::gtk_slice_list_model_set_model(
self.to_glib_none().0,
model.map(|p| p.as_ref()).to_glib_none().0,
);
}
}
#[doc(alias = "gtk_slice_list_model_set_offset")]
pub fn set_offset(&self, offset: u32) {
unsafe {
ffi::gtk_slice_list_model_set_offset(self.to_glib_none().0, offset);
}
}
#[doc(alias = "gtk_slice_list_model_set_size")]
pub fn set_size(&self, size: u32) {
unsafe {
ffi::gtk_slice_list_model_set_size(self.to_glib_none().0, size);
}
}
pub fn connect_property_model_notify<F: Fn(&SliceListModel) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn notify_model_trampoline<F: Fn(&SliceListModel) + 'static>(
this: *mut ffi::GtkSliceListModel,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::model\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_model_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
pub fn connect_property_offset_notify<F: Fn(&SliceListModel) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn notify_offset_trampoline<F: Fn(&SliceListModel) + 'static>(
this: *mut ffi::GtkSliceListModel,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::offset\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_offset_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
pub fn connect_property_size_notify<F: Fn(&SliceListModel) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn notify_size_trampoline<F: Fn(&SliceListModel) + 'static>(
this: *mut ffi::GtkSliceListModel,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::size\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_size_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
) |
#[derive(Clone, Default)]
pub struct SliceListModelBuilder {
model: Option<gio::ListModel>,
offset: Option<u32>,
size: Option<u32>,
}
impl SliceListModelBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn build(self) -> SliceListModel {
let mut properties: Vec<(&str, &dyn ToValue)> = vec![];
if let Some(ref model) = self.model {
properties.push(("model", model));
}
if let Some(ref offset) = self.offset {
properties.push(("offset", offset));
}
if let Some(ref size) = self.size {
properties.push(("size", size));
}
let ret = glib::Object::new(SliceListModel::static_type(), &properties)
.expect("object new")
.downcast::<SliceListModel>()
.expect("downcast");
ret
}
pub fn model<P: IsA<gio::ListModel>>(mut self, model: &P) -> Self {
self.model = Some(model.clone().upcast());
self
}
pub fn offset(mut self, offset: u32) -> Self {
self.offset = Some(offset);
self
}
pub fn size(mut self, size: u32) -> Self {
self.size = Some(size);
self
}
}
impl fmt::Display for SliceListModel {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("SliceListModel")
}
} | }
}
} |
factory.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package monitors
import (
"github.com/snappyflow/beats/v7/heartbeat/scheduler"
"github.com/snappyflow/beats/v7/libbeat/beat"
"github.com/snappyflow/beats/v7/libbeat/cfgfile"
"github.com/snappyflow/beats/v7/libbeat/common"
)
// RunnerFactory that can be used to create cfg.Runner cast versions of Monitor
// suitable for config reloading.
type RunnerFactory struct {
sched *scheduler.Scheduler
allowWatches bool
}
// NewFactory takes a scheduler and creates a RunnerFactory that can create cfgfile.Runner(Monitor) objects.
func NewFactory(sched *scheduler.Scheduler, allowWatches bool) *RunnerFactory {
return &RunnerFactory{sched, allowWatches}
}
// Create makes a new Runner for a new monitor with the given Config. | monitor, err := newMonitor(c, globalPluginsReg, p, f.sched, f.allowWatches)
return monitor, err
}
// CheckConfig checks to see if the given monitor config is valid.
func (f *RunnerFactory) CheckConfig(config *common.Config) error {
return checkMonitorConfig(config, globalPluginsReg, f.allowWatches)
} | func (f *RunnerFactory) Create(p beat.PipelineConnector, c *common.Config) (cfgfile.Runner, error) { |
dataset_parser.py | #!/usr/bin/env python
"""
@package mi.dataset.parser A collection of parsers that strip data blocks
out of files and feed them into the system.
@file mi/dataset/parser.py
@author Steve Foley
@brief Base classes for data set agent parsers
"""
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
import time
import ntplib
from mi.core.log import get_logger
log = get_logger()
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.exceptions import RecoverableSampleException, SampleEncodingException
from mi.core.exceptions import NotImplementedException, UnexpectedDataException
from mi.core.common import BaseEnum
class DataSetDriverConfigKeys(BaseEnum):
PARTICLE_MODULE = "particle_module"
PARTICLE_CLASS = "particle_class"
PARTICLE_CLASSES_DICT = "particle_classes_dict"
DIRECTORY = "directory"
STORAGE_DIRECTORY = "storage_directory"
PATTERN = "pattern"
FREQUENCY = "frequency"
FILE_MOD_WAIT_TIME = "file_mod_wait_time"
HARVESTER = "harvester"
PARSER = "parser"
MODULE = "module"
CLASS = "class"
URI = "uri"
CLASS_ARGS = "class_args"
class Parser(object):
""" abstract class to show API needed for plugin poller objects """
def __init__(self, config, stream_handle, state, sieve_fn,
state_callback, publish_callback, exception_callback=None):
"""
@param config The configuration parameters to feed into the parser
@param stream_handle An already open file-like filehandle
@param state The location in the file to start parsing from.
This reflects what has already been published.
@param sieve_fn A sieve function that might be added to a handler
to appropriate filter out the data
@param state_callback The callback method from the agent driver
(ultimately the agent) to call back when a state needs to be
updated
@param publish_callback The callback from the agent driver (and
ultimately from the agent) where we send our sample particle to
be published into ION
@param exception_callback The callback from the agent driver (and
ultimately from the agent) where we send our error events to
be published into ION
"""
self._chunker = StringChunker(sieve_fn)
self._stream_handle = stream_handle
self._state = state
self._state_callback = state_callback
self._publish_callback = publish_callback
self._exception_callback = exception_callback
self._config = config
# Build class from module and class name, then set the state
if config.get(DataSetDriverConfigKeys.PARTICLE_CLASS) is not None:
if config.get(DataSetDriverConfigKeys.PARTICLE_MODULE):
self._particle_module = __import__(config.get(DataSetDriverConfigKeys.PARTICLE_MODULE),
fromlist=[config.get(DataSetDriverConfigKeys.PARTICLE_CLASS)])
# if there is more than one particle class for this parser, this cannot be used, need to hard code the
# particle class in the driver
try:
self._particle_class = getattr(self._particle_module,
config.get(DataSetDriverConfigKeys.PARTICLE_CLASS))
except TypeError:
self._particle_class = None
else:
log.warn("Particle class is specified in config, but no particle module is specified in config")
def get_records(self, max_count):
"""
Returns a list of particles (following the instrument driver structure).
"""
raise NotImplementedException("get_records() not overridden!")
def _publish_sample(self, samples):
"""
Publish the samples with the given publishing callback.
@param samples The list of data particle to publish up to the system
"""
if isinstance(samples, list):
self._publish_callback(samples)
else:
self._publish_callback([samples])
def _extract_sample(self, particle_class, regex, raw_data, timestamp):
"""
Extract sample from a response line if present and publish
parsed particle
@param particle_class The class to instantiate for this specific
data particle. Parameterizing this allows for simple, standard
behavior from this routine
@param regex The regular expression that matches a data sample if regex
is none then process every line
@param raw_data data to input into this particle.
@retval return a raw particle if a sample was found, else None
"""
particle = None
try:
if regex is None or regex.match(raw_data):
particle = particle_class(raw_data, internal_timestamp=timestamp,
preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP)
# need to actually parse the particle fields to find out of there are errors
particle.generate()
encoding_errors = particle.get_encoding_errors()
if encoding_errors:
log.warn("Failed to encode: %s", encoding_errors)
raise SampleEncodingException("Failed to encode: %s" % encoding_errors)
except (RecoverableSampleException, SampleEncodingException) as e:
log.error("Sample exception detected: %s raw data: %s", e, raw_data)
if self._exception_callback:
self._exception_callback(e)
else:
raise e
return particle
class BufferLoadingParser(Parser):
"""
This class loads data values into a record buffer, then offers up
records from this buffer as they are requested. Parsers dont have
to operate this way, but it can keep memory in check and smooth out
stream inputs if they dont all come at once.
"""
def __init__(self, config, stream_handle, state, sieve_fn,
state_callback, publish_callback, exception_callback=None):
"""
@param config The configuration parameters to feed into the parser
@param stream_handle An already open file-like filehandle
@param state The location in the file to start parsing from.
This reflects what has already been published.
@param sieve_fn A sieve function that might be added to a handler
to appropriate filter out the data
@param state_callback The callback method from the agent driver
(ultimately the agent) to call back when a state needs to be
updated
@param publish_callback The callback from the agent driver (and
ultimately from the agent) where we send our sample particle to
be published into ION
@param exception_callback The callback from the agent driver (and
ultimately from the agent) where we send our error events to
be published into ION
"""
self._record_buffer = []
self._timestamp = 0.0
self.file_complete = False
super(BufferLoadingParser, self).__init__(config, stream_handle, state,
sieve_fn, state_callback,
publish_callback,
exception_callback)
def get_records(self, num_records):
"""
Go ahead and execute the data parsing loop up to a point. This involves
getting data from the file, stuffing it in to the chunker, then parsing
it and publishing.
@param num_records The number of records to gather
@retval Return the list of particles requested, [] if none available
"""
if num_records <= 0:
return []
try:
while len(self._record_buffer) < num_records:
self._load_particle_buffer()
except EOFError:
self._process_end_of_file()
return self._yank_particles(num_records)
def _process_end_of_file(self):
|
def _yank_particles(self, num_records):
"""
Get particles out of the buffer and publish them. Update the state
of what has been published, too.
@param num_records The number of particles to remove from the buffer
@retval A list with num_records elements from the buffer. If num_records
cannot be collected (perhaps due to an EOF), the list will have the
elements it was able to collect.
"""
if len(self._record_buffer) < num_records:
num_to_fetch = len(self._record_buffer)
else:
num_to_fetch = num_records
log.trace("Yanking %s records of %s requested",
num_to_fetch,
num_records)
return_list = []
records_to_return = self._record_buffer[:num_to_fetch]
self._record_buffer = self._record_buffer[num_to_fetch:]
if len(records_to_return) > 0:
self._state = records_to_return[-1][1] # state side of tuple of last entry
# strip the state info off of them now that we have what we need
for item in records_to_return:
log.debug("Record to return: %s", item)
return_list.append(item[0])
self._publish_sample(return_list)
log.trace("Sending parser state [%s] to driver", self._state)
file_ingested = False
if self.file_complete and len(self._record_buffer) == 0:
# file has been read completely and all records pulled out of the record buffer
file_ingested = True
self._state_callback(self._state, file_ingested) # push new state to driver
return return_list
def _load_particle_buffer(self):
"""
Load up the internal record buffer with some particles based on a
gather from the get_block method.
"""
while self.get_block():
result = self.parse_chunks()
self._record_buffer.extend(result)
def get_block(self, size=1024):
"""
Get a block of characters for processing
@param size The size of the block to try to read
@retval The length of data retreived
@throws EOFError when the end of the file is reached
"""
# read in some more data
data = self._stream_handle.read(size)
if data:
self._chunker.add_chunk(data, ntplib.system_to_ntp_time(time.time()))
return len(data)
else: # EOF
self.file_complete = True
raise EOFError
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle, update the position and
timestamp. Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state (ie "(sample, state)"). An empty list of
nothing was parsed.
"""
raise NotImplementedException("Must write parse_chunks()!")
class SimpleParser(Parser):
def __init__(self, config, stream_handle, exception_callback):
"""
Initialize the simple parser, which does not use state or the chunker
and sieve functions.
@param config: The parser configuration dictionary
@param stream_handle: The stream handle of the file to parse
@param exception_callback: The callback to use when an exception occurs
"""
# the record buffer which will store all parsed particles
self._record_buffer = []
# a flag indicating if the file has been parsed or not
self._file_parsed = False
super(SimpleParser, self).__init__(config,
stream_handle,
None, # state not used
None, # sieve_fn not used
None, # state_callback not used
None, # publish_callback not used
exception_callback)
def parse_file(self):
"""
This method must be overridden. This method should open and read the file and parser the data within, and at
the end of this method self._record_buffer will be filled with all the particles in the file.
"""
raise NotImplementedException("parse_file() not overridden!")
def get_records(self, number_requested=1):
"""
Initiate parsing the file if it has not been done already, and pop particles off the record buffer to
return as many as requested if they are available in the buffer.
@param number_requested the number of records requested to be returned
@return an array of particles, with a length of the number requested or less
"""
particles_to_return = []
if number_requested > 0:
if self._file_parsed is False:
self.parse_file()
self._file_parsed = True
while len(particles_to_return) < number_requested and len(self._record_buffer) > 0:
particles_to_return.append(self._record_buffer.pop(0))
return particles_to_return
| """
Confirm that the chunker does not have any extra bytes left at the end of the file
"""
(nd_timestamp, non_data) = self._chunker.get_next_non_data()
(timestamp, chunk) = self._chunker.get_next_data()
if non_data and len(non_data) > 0:
log.warn("Have extra unexplained non-data bytes at the end of the file:%s", non_data)
raise UnexpectedDataException("Have extra unexplained non-data bytes at the end of the file:%s" % non_data)
elif chunk and len(chunk) > 0:
log.warn("Have extra unexplained data chunk bytes at the end of the file:%s", chunk)
raise UnexpectedDataException("Have extra unexplained data chunk bytes at the end of the file:%s" % chunk) |
backend.rs | use super::stackmap::StackmapRegistry;
use crate::intrinsics::Intrinsics;
use crate::structs::{Callbacks, LLVMModule, LLVMResult, MemProtect};
use inkwell::{
memory_buffer::MemoryBuffer,
module::Module,
targets::{FileType, TargetMachine},
};
use libc::c_char;
use std::{
any::Any,
ffi::{c_void, CString},
fs::File,
io::Write,
mem,
ops::Deref,
ptr::{self, NonNull},
slice, str,
sync::{Arc, Once},
};
use wasmer_runtime_core::{
backend::{
sys::{Memory, Protect},
CacheGen, RunnableModule,
},
cache::Error as CacheError,
module::ModuleInfo,
state::ModuleStateMap,
structures::TypedIndex,
typed_func::{Wasm, WasmTrapInfo},
types::{LocalFuncIndex, SigIndex},
vm, vmcalls,
};
extern "C" {
fn module_load(
mem_ptr: *const u8,
mem_size: usize,
callbacks: Callbacks,
module_out: &mut *mut LLVMModule,
) -> LLVMResult;
fn module_delete(module: *mut LLVMModule);
fn get_func_symbol(module: *mut LLVMModule, name: *const c_char) -> *const vm::Func;
fn llvm_backend_get_stack_map_ptr(module: *const LLVMModule) -> *const u8;
fn llvm_backend_get_stack_map_size(module: *const LLVMModule) -> usize;
fn llvm_backend_get_code_ptr(module: *const LLVMModule) -> *const u8;
fn llvm_backend_get_code_size(module: *const LLVMModule) -> usize;
fn throw_trap(ty: i32) -> !;
fn throw_breakpoint(ty: i64) -> !;
/// This should be the same as spliting up the fat pointer into two arguments,
/// but this is cleaner, I think?
#[cfg_attr(nightly, unwind(allowed))]
#[allow(improper_ctypes)]
fn throw_any(data: *mut dyn Any) -> !;
#[allow(improper_ctypes)]
fn invoke_trampoline(
trampoline: unsafe extern "C" fn(*mut vm::Ctx, NonNull<vm::Func>, *const u64, *mut u64),
vmctx_ptr: *mut vm::Ctx,
func_ptr: NonNull<vm::Func>,
params: *const u64,
results: *mut u64,
trap_out: *mut WasmTrapInfo,
user_error: *mut Option<Box<dyn Any>>,
invoke_env: Option<NonNull<c_void>>,
) -> bool;
}
static SIGNAL_HANDLER_INSTALLED: Once = Once::new();
fn get_callbacks() -> Callbacks {
extern "C" fn alloc_memory(
size: usize,
protect: MemProtect,
ptr_out: &mut *mut u8,
size_out: &mut usize,
) -> LLVMResult {
unsafe { crate::platform::alloc_memory(size, protect, ptr_out, size_out) }
}
extern "C" fn protect_memory(ptr: *mut u8, size: usize, protect: MemProtect) -> LLVMResult {
unsafe { crate::platform::protect_memory(ptr, size, protect) }
}
extern "C" fn dealloc_memory(ptr: *mut u8, size: usize) -> LLVMResult {
unsafe { crate::platform::dealloc_memory(ptr, size) }
}
extern "C" fn lookup_vm_symbol(name_ptr: *const c_char, length: usize) -> *const vm::Func {
#[cfg(target_os = "macos")]
macro_rules! fn_name {
($s:literal) => {
concat!("_", $s)
};
}
#[cfg(not(target_os = "macos"))]
macro_rules! fn_name {
($s:literal) => {
$s
};
}
let name_slice = unsafe { slice::from_raw_parts(name_ptr as *const u8, length) };
let name = str::from_utf8(name_slice).unwrap();
match name {
fn_name!("vm.memory.grow.dynamic.local") => vmcalls::local_dynamic_memory_grow as _,
fn_name!("vm.memory.size.dynamic.local") => vmcalls::local_dynamic_memory_size as _,
fn_name!("vm.memory.grow.static.local") => vmcalls::local_static_memory_grow as _,
fn_name!("vm.memory.size.static.local") => vmcalls::local_static_memory_size as _,
fn_name!("vm.memory.grow.dynamic.import") => vmcalls::imported_dynamic_memory_grow as _,
fn_name!("vm.memory.size.dynamic.import") => vmcalls::imported_dynamic_memory_size as _,
fn_name!("vm.memory.grow.static.import") => vmcalls::imported_static_memory_grow as _,
fn_name!("vm.memory.size.static.import") => vmcalls::imported_static_memory_size as _,
fn_name!("vm.exception.trap") => throw_trap as _,
fn_name!("vm.breakpoint") => throw_breakpoint as _,
_ => ptr::null(),
}
}
extern "C" fn visit_fde(fde: *mut u8, size: usize, visitor: extern "C" fn(*mut u8)) {
unsafe {
crate::platform::visit_fde(fde, size, visitor);
}
}
Callbacks {
alloc_memory,
protect_memory,
dealloc_memory,
lookup_vm_symbol,
visit_fde,
}
}
pub enum Buffer {
LlvmMemory(MemoryBuffer),
Memory(Memory),
}
impl Deref for Buffer {
type Target = [u8];
fn deref(&self) -> &[u8] {
match self {
Buffer::LlvmMemory(mem_buffer) => mem_buffer.as_slice(),
Buffer::Memory(memory) => unsafe { memory.as_slice() },
}
}
}
unsafe impl Send for LLVMBackend {}
unsafe impl Sync for LLVMBackend {}
pub struct LLVMBackend {
module: *mut LLVMModule,
#[allow(dead_code)]
buffer: Arc<Buffer>,
msm: Option<ModuleStateMap>,
local_func_id_to_offset: Vec<usize>,
}
impl LLVMBackend {
pub fn new(
module: Module,
_intrinsics: Intrinsics,
_stackmaps: &StackmapRegistry,
_module_info: &ModuleInfo,
target_machine: &TargetMachine,
) -> (Self, LLVMCache) {
let memory_buffer = target_machine
.write_to_memory_buffer(&module, FileType::Object)
.unwrap();
let mem_buf_slice = memory_buffer.as_slice();
if let Some(path) = unsafe { &crate::GLOBAL_OPTIONS.obj_file } {
let mut file = File::create(path).unwrap();
let mut pos = 0;
while pos < mem_buf_slice.len() {
pos += file.write(&mem_buf_slice[pos..]).unwrap();
}
}
let callbacks = get_callbacks();
let mut module: *mut LLVMModule = ptr::null_mut();
let res = unsafe {
module_load(
mem_buf_slice.as_ptr(),
mem_buf_slice.len(),
callbacks,
&mut module,
)
};
if res != LLVMResult::OK {
panic!("failed to load object")
}
let buffer = Arc::new(Buffer::LlvmMemory(memory_buffer));
#[cfg(all(any(target_os = "linux", target_os = "macos"), target_arch = "x86_64"))]
{
use super::stackmap::{self, StkMapRecord, StkSizeRecord};
use std::collections::BTreeMap;
let stackmaps = _stackmaps;
let module_info = _module_info;
let raw_stackmap = unsafe {
std::slice::from_raw_parts(
llvm_backend_get_stack_map_ptr(module),
llvm_backend_get_stack_map_size(module),
)
};
if raw_stackmap.len() > 0 {
let map = stackmap::StackMap::parse(raw_stackmap).unwrap();
let (code_ptr, code_size) = unsafe {
(
llvm_backend_get_code_ptr(module),
llvm_backend_get_code_size(module),
)
};
let mut msm = ModuleStateMap {
local_functions: Default::default(),
total_size: code_size,
};
let num_local_functions =
module_info.func_assoc.len() - module_info.imported_functions.len();
let mut local_func_id_to_addr: Vec<usize> = Vec::with_capacity(num_local_functions);
// All local functions.
for index in module_info.imported_functions.len()..module_info.func_assoc.len() {
let name = if cfg!(target_os = "macos") {
format!("_fn{}", index)
} else {
format!("fn{}", index)
};
let c_str = CString::new(name).unwrap();
let ptr = unsafe { get_func_symbol(module, c_str.as_ptr()) };
assert!(!ptr.is_null());
local_func_id_to_addr.push(ptr as usize);
}
let mut addr_to_size_record: BTreeMap<usize, &StkSizeRecord> = BTreeMap::new();
for record in &map.stk_size_records {
addr_to_size_record.insert(record.function_address as usize, record);
}
let mut map_records: BTreeMap<usize, &StkMapRecord> = BTreeMap::new();
for record in &map.stk_map_records {
map_records.insert(record.patchpoint_id as usize, record);
}
for ((start_id, start_entry), (end_id, end_entry)) in stackmaps
.entries
.iter()
.enumerate()
.step_by(2)
.zip(stackmaps.entries.iter().enumerate().skip(1).step_by(2))
{
if let Some(map_record) = map_records.get(&start_id) {
assert_eq!(start_id, map_record.patchpoint_id as usize);
assert!(start_entry.is_start);
assert!(!end_entry.is_start);
let end_record = map_records.get(&end_id);
let addr = local_func_id_to_addr[start_entry.local_function_id];
let size_record = *addr_to_size_record
.get(&addr)
.expect("size_record not found");
start_entry.populate_msm(
module_info,
code_ptr as usize,
&map,
size_record,
map_record,
end_record.map(|x| (end_entry, *x)),
&mut msm,
);
} else {
// The record is optimized out.
}
}
let code_ptr = unsafe { llvm_backend_get_code_ptr(module) } as usize;
let code_len = unsafe { llvm_backend_get_code_size(module) } as usize;
let local_func_id_to_offset: Vec<usize> = local_func_id_to_addr
.iter()
.map(|&x| {
assert!(x >= code_ptr && x < code_ptr + code_len);
x - code_ptr
})
.collect();
return (
Self {
module,
buffer: Arc::clone(&buffer),
msm: Some(msm),
local_func_id_to_offset,
},
LLVMCache { buffer },
);
}
}
// Stackmap is not supported on this platform, or this module contains no functions so no stackmaps.
(
Self {
module,
buffer: Arc::clone(&buffer),
msm: None,
local_func_id_to_offset: vec![],
},
LLVMCache { buffer },
)
}
pub unsafe fn from_buffer(memory: Memory) -> Result<(Self, LLVMCache), String> |
}
impl Drop for LLVMBackend {
fn drop(&mut self) {
unsafe { module_delete(self.module) }
}
}
impl RunnableModule for LLVMBackend {
fn get_func(
&self,
info: &ModuleInfo,
local_func_index: LocalFuncIndex,
) -> Option<NonNull<vm::Func>> {
let index = info.imported_functions.len() + local_func_index.index();
let name = if cfg!(target_os = "macos") {
format!("_fn{}", index)
} else {
format!("fn{}", index)
};
let c_str = CString::new(name).ok()?;
let ptr = unsafe { get_func_symbol(self.module, c_str.as_ptr()) };
NonNull::new(ptr as _)
}
fn get_trampoline(&self, _: &ModuleInfo, sig_index: SigIndex) -> Option<Wasm> {
let trampoline: unsafe extern "C" fn(
*mut vm::Ctx,
NonNull<vm::Func>,
*const u64,
*mut u64,
) = unsafe {
let name = if cfg!(target_os = "macos") {
format!("_trmp{}", sig_index.index())
} else {
format!("trmp{}", sig_index.index())
};
let c_str = CString::new(name).unwrap();
let symbol = get_func_symbol(self.module, c_str.as_ptr());
assert!(!symbol.is_null());
mem::transmute(symbol)
};
SIGNAL_HANDLER_INSTALLED.call_once(|| unsafe {
crate::platform::install_signal_handler();
});
Some(unsafe { Wasm::from_raw_parts(trampoline, invoke_trampoline, None) })
}
fn get_code(&self) -> Option<&[u8]> {
Some(unsafe {
std::slice::from_raw_parts(
llvm_backend_get_code_ptr(self.module),
llvm_backend_get_code_size(self.module),
)
})
}
fn get_local_function_offsets(&self) -> Option<Vec<usize>> {
Some(self.local_func_id_to_offset.clone())
}
fn get_module_state_map(&self) -> Option<ModuleStateMap> {
self.msm.clone()
}
unsafe fn do_early_trap(&self, data: Box<dyn Any>) -> ! {
throw_any(Box::leak(data))
}
}
unsafe impl Send for LLVMCache {}
unsafe impl Sync for LLVMCache {}
pub struct LLVMCache {
buffer: Arc<Buffer>,
}
impl CacheGen for LLVMCache {
fn generate_cache(&self) -> Result<(Box<[u8]>, Memory), CacheError> {
let mut memory = Memory::with_size_protect(self.buffer.len(), Protect::ReadWrite)
.map_err(CacheError::SerializeError)?;
let buffer = self.buffer.deref();
unsafe {
memory.as_slice_mut()[..buffer.len()].copy_from_slice(buffer);
}
Ok(([].as_ref().into(), memory))
}
}
| {
let callbacks = get_callbacks();
let mut module: *mut LLVMModule = ptr::null_mut();
let slice = memory.as_slice();
let res = module_load(slice.as_ptr(), slice.len(), callbacks, &mut module);
if res != LLVMResult::OK {
return Err("failed to load object".to_string());
}
SIGNAL_HANDLER_INSTALLED.call_once(|| {
crate::platform::install_signal_handler();
});
let buffer = Arc::new(Buffer::Memory(memory));
Ok((
Self {
module,
buffer: Arc::clone(&buffer),
msm: None,
local_func_id_to_offset: vec![],
},
LLVMCache { buffer },
))
} |
0003_auto_20200820_0704.py | # Generated by Django 3.0.3 on 2020-08-19 23:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('predictor', '0002_review'),
] | migrations.AlterField(
model_name='review',
name='dokumen_relevan',
field=models.IntegerField(default=0),
),
] |
operations = [ |
lib.rs | mod utils;
use cfg_if::cfg_if;
use std::fmt;
use wasm_bindgen::prelude::*;
cfg_if! {
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
if #[cfg(feature = "wee_alloc")] {
extern crate wee_alloc;
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
}
}
macro_rules! log {
( $( $t:tt )* ) => {
web_sys::console::log_1(&format!( $( $t )* ).into());
}
}
#[wasm_bindgen]
#[repr(u8)]
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Cell {
Dead = 0,
Alive = 1,
}
#[wasm_bindgen]
#[derive(Debug, PartialEq, Eq)]
pub struct Universe {
width: u32,
height: u32,
cells: Vec<Cell>,
}
#[wasm_bindgen]
impl Universe {
pub fn new() -> Self {
utils::set_panic_hook();
let width = 64;
let height = 64; | if i % 2 == 0 || i % 7 == 0 {
Cell::Alive
} else {
Cell::Dead
}
})
.collect();
Universe {
width,
height,
cells,
}
}
pub fn render(&self) -> String {
self.to_string()
}
pub fn tick(&mut self) {
// TODO: Meh, this should return a new Universe instead of mutating itself.
let mut next = self.cells.clone();
for row in 0..self.height {
for column in 0..self.width {
let idx = self.get_index(row, column);
let cell = self.cells[idx];
let live_neighbor_count = self.live_neighbor_count(row, column);
let next_cell = match (cell, live_neighbor_count) {
(Cell::Alive, x) if x < 2 => Cell::Dead,
(Cell::Alive, 2) | (Cell::Alive, 3) => Cell::Alive,
(Cell::Alive, x) if x > 3 => Cell::Dead,
(Cell::Dead, 3) => Cell::Alive,
(otherwise, _) => otherwise,
};
next[idx] = next_cell;
}
}
self.cells = next;
}
pub fn width(&self) -> u32 {
self.width
}
pub fn set_width(&mut self, width: u32) {
self.width = width;
self.cells = (0..width * self.height).map(|_| Cell::Dead).collect();
}
pub fn height(&self) -> u32 {
self.height
}
pub fn set_height(&mut self, height: u32) {
self.height = height;
self.cells = (0..height * self.width).map(|_| Cell::Dead).collect();
}
pub fn cells_ptr(&self) -> *const Cell {
self.cells.as_ptr()
}
pub fn toggle_cell(&mut self, row: u32, col: u32) {
let idx = self.get_index(row, col);
log!("toggle: {} {}", row, col);
self.cells[idx].toggle();
}
fn get_index(&self, row: u32, column: u32) -> usize {
return (row * self.width + column) as usize;
}
fn live_neighbor_count(&self, row: u32, column: u32) -> u8 {
let mut count = 0;
// TODO: This should be a sum over a flattened fold.
for delta_row in [self.height - 1, 0, 1].iter().cloned() {
for delta_column in [self.width - 1, 0, 1].iter().cloned() {
if delta_row == 0 && delta_column == 0 {
continue;
}
let neighbor_row = (row + delta_row) % self.height;
let neighbor_column = (column + delta_column) % self.width;
let idx = self.get_index(neighbor_row, neighbor_column);
count += self.cells[idx] as u8;
}
}
count
}
}
impl Universe {
pub fn cells(&self) -> &[Cell] {
&self.cells
}
pub fn set_cells(&mut self, cells: &[(u32, u32)]) {
for (row, col) in cells.iter().cloned() {
let idx = self.get_index(row, col);
self.cells[idx] = Cell::Alive;
}
}
}
impl Cell {
fn toggle(&mut self) {
*self = match *self {
Cell::Dead => Cell::Alive,
Cell::Alive => Cell::Dead,
}
}
}
impl fmt::Display for Universe {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for line in self.cells.as_slice().chunks(self.width as usize) {
for &cell in line {
let symbol = match cell {
Cell::Alive => '◻',
Cell::Dead => '◼',
};
write!(f, "{}", symbol)?;
}
write!(f, "\n")?;
}
Ok(())
}
}
#[wasm_bindgen]
extern "C" {
fn alert(s: &str);
}
#[wasm_bindgen]
pub fn greet(name: &str) {
alert(&format!("Hello, {}!", name));
} | let cells = (0..width * height)
.map(|i| { |
databaseaccounts.go | package documentdb
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// DatabaseAccountsClient is the azure Cosmos DB Database Service Resource Provider REST API
type DatabaseAccountsClient struct {
BaseClient
}
// NewDatabaseAccountsClient creates an instance of the DatabaseAccountsClient client.
func | (subscriptionID string) DatabaseAccountsClient {
return NewDatabaseAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewDatabaseAccountsClientWithBaseURI creates an instance of the DatabaseAccountsClient client using a custom
// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
// stack).
func NewDatabaseAccountsClientWithBaseURI(baseURI string, subscriptionID string) DatabaseAccountsClient {
return DatabaseAccountsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CheckNameExists checks that the Azure Cosmos DB account name already exists. A valid account name may contain only
// lowercase letters, numbers, and the '-' character, and must be between 3 and 50 characters.
// Parameters:
// accountName - cosmos DB database account name.
func (client DatabaseAccountsClient) CheckNameExists(ctx context.Context, accountName string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.CheckNameExists")
defer func() {
sc := -1
if result.Response != nil {
sc = result.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "CheckNameExists", err.Error())
}
req, err := client.CheckNameExistsPreparer(ctx, accountName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CheckNameExists", nil, "Failure preparing request")
return
}
resp, err := client.CheckNameExistsSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CheckNameExists", resp, "Failure sending request")
return
}
result, err = client.CheckNameExistsResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CheckNameExists", resp, "Failure responding to request")
return
}
return
}
// CheckNameExistsPreparer prepares the CheckNameExists request.
func (client DatabaseAccountsClient) CheckNameExistsPreparer(ctx context.Context, accountName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsHead(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/providers/Microsoft.DocumentDB/databaseAccountNames/{accountName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CheckNameExistsSender sends the CheckNameExists request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) CheckNameExistsSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// CheckNameExistsResponder handles the response to the CheckNameExists request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) CheckNameExistsResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound),
autorest.ByClosing())
result.Response = resp
return
}
// CreateOrUpdate creates or updates an Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// createUpdateParameters - the parameters to provide for the current database account.
func (client DatabaseAccountsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, createUpdateParameters DatabaseAccountCreateUpdateParameters) (result DatabaseAccountsCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: createUpdateParameters,
Constraints: []validation.Constraint{{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.ConsistencyPolicy", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.ConsistencyPolicy.MaxStalenessPrefix", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.ConsistencyPolicy.MaxStalenessPrefix", Name: validation.InclusiveMaximum, Rule: int64(2147483647), Chain: nil},
{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.ConsistencyPolicy.MaxStalenessPrefix", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil},
}},
{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.ConsistencyPolicy.MaxIntervalInSeconds", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.ConsistencyPolicy.MaxIntervalInSeconds", Name: validation.InclusiveMaximum, Rule: int64(86400), Chain: nil},
{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.ConsistencyPolicy.MaxIntervalInSeconds", Name: validation.InclusiveMinimum, Rule: int64(5), Chain: nil},
}},
}},
{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.Locations", Name: validation.Null, Rule: true, Chain: nil},
{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.DatabaseAccountOfferType", Name: validation.Null, Rule: true, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "CreateOrUpdate", err.Error())
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, createUpdateParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client DatabaseAccountsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, createUpdateParameters DatabaseAccountCreateUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}", pathParameters),
autorest.WithJSON(createUpdateParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) CreateOrUpdateSender(req *http.Request) (future DatabaseAccountsCreateOrUpdateFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) CreateOrUpdateResponder(resp *http.Response) (result DatabaseAccount, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// CreateUpdateCassandraKeyspace create or update an Azure Cosmos DB Cassandra keyspace
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// keyspaceName - cosmos DB keyspace name.
// createUpdateCassandraKeyspaceParameters - the parameters to provide for the current Cassandra keyspace.
func (client DatabaseAccountsClient) CreateUpdateCassandraKeyspace(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, createUpdateCassandraKeyspaceParameters CassandraKeyspaceCreateUpdateParameters) (result DatabaseAccountsCreateUpdateCassandraKeyspaceFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.CreateUpdateCassandraKeyspace")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: createUpdateCassandraKeyspaceParameters,
Constraints: []validation.Constraint{{Target: "createUpdateCassandraKeyspaceParameters.CassandraKeyspaceCreateUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateCassandraKeyspaceParameters.CassandraKeyspaceCreateUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateCassandraKeyspaceParameters.CassandraKeyspaceCreateUpdateProperties.Resource.ID", Name: validation.Null, Rule: true, Chain: nil}}},
{Target: "createUpdateCassandraKeyspaceParameters.CassandraKeyspaceCreateUpdateProperties.Options", Name: validation.Null, Rule: true, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "CreateUpdateCassandraKeyspace", err.Error())
}
req, err := client.CreateUpdateCassandraKeyspacePreparer(ctx, resourceGroupName, accountName, keyspaceName, createUpdateCassandraKeyspaceParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateCassandraKeyspace", nil, "Failure preparing request")
return
}
result, err = client.CreateUpdateCassandraKeyspaceSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateCassandraKeyspace", nil, "Failure sending request")
return
}
return
}
// CreateUpdateCassandraKeyspacePreparer prepares the CreateUpdateCassandraKeyspace request.
func (client DatabaseAccountsClient) CreateUpdateCassandraKeyspacePreparer(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, createUpdateCassandraKeyspaceParameters CassandraKeyspaceCreateUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"keyspaceName": autorest.Encode("path", keyspaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/cassandra/keyspaces/{keyspaceName}", pathParameters),
autorest.WithJSON(createUpdateCassandraKeyspaceParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateUpdateCassandraKeyspaceSender sends the CreateUpdateCassandraKeyspace request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) CreateUpdateCassandraKeyspaceSender(req *http.Request) (future DatabaseAccountsCreateUpdateCassandraKeyspaceFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// CreateUpdateCassandraKeyspaceResponder handles the response to the CreateUpdateCassandraKeyspace request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) CreateUpdateCassandraKeyspaceResponder(resp *http.Response) (result CassandraKeyspace, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// CreateUpdateCassandraTable create or update an Azure Cosmos DB Cassandra Table
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// keyspaceName - cosmos DB keyspace name.
// tableName - cosmos DB table name.
// createUpdateCassandraTableParameters - the parameters to provide for the current Cassandra Table.
func (client DatabaseAccountsClient) CreateUpdateCassandraTable(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, tableName string, createUpdateCassandraTableParameters CassandraTableCreateUpdateParameters) (result DatabaseAccountsCreateUpdateCassandraTableFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.CreateUpdateCassandraTable")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: createUpdateCassandraTableParameters,
Constraints: []validation.Constraint{{Target: "createUpdateCassandraTableParameters.CassandraTableCreateUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateCassandraTableParameters.CassandraTableCreateUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateCassandraTableParameters.CassandraTableCreateUpdateProperties.Resource.ID", Name: validation.Null, Rule: true, Chain: nil}}},
{Target: "createUpdateCassandraTableParameters.CassandraTableCreateUpdateProperties.Options", Name: validation.Null, Rule: true, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "CreateUpdateCassandraTable", err.Error())
}
req, err := client.CreateUpdateCassandraTablePreparer(ctx, resourceGroupName, accountName, keyspaceName, tableName, createUpdateCassandraTableParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateCassandraTable", nil, "Failure preparing request")
return
}
result, err = client.CreateUpdateCassandraTableSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateCassandraTable", nil, "Failure sending request")
return
}
return
}
// CreateUpdateCassandraTablePreparer prepares the CreateUpdateCassandraTable request.
func (client DatabaseAccountsClient) CreateUpdateCassandraTablePreparer(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, tableName string, createUpdateCassandraTableParameters CassandraTableCreateUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"keyspaceName": autorest.Encode("path", keyspaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/cassandra/keyspaces/{keyspaceName}/tables/{tableName}", pathParameters),
autorest.WithJSON(createUpdateCassandraTableParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateUpdateCassandraTableSender sends the CreateUpdateCassandraTable request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) CreateUpdateCassandraTableSender(req *http.Request) (future DatabaseAccountsCreateUpdateCassandraTableFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// CreateUpdateCassandraTableResponder handles the response to the CreateUpdateCassandraTable request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) CreateUpdateCassandraTableResponder(resp *http.Response) (result CassandraTable, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// CreateUpdateGremlinDatabase create or update an Azure Cosmos DB Gremlin database
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// createUpdateGremlinDatabaseParameters - the parameters to provide for the current Gremlin database.
func (client DatabaseAccountsClient) CreateUpdateGremlinDatabase(ctx context.Context, resourceGroupName string, accountName string, databaseName string, createUpdateGremlinDatabaseParameters GremlinDatabaseCreateUpdateParameters) (result DatabaseAccountsCreateUpdateGremlinDatabaseFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.CreateUpdateGremlinDatabase")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: createUpdateGremlinDatabaseParameters,
Constraints: []validation.Constraint{{Target: "createUpdateGremlinDatabaseParameters.GremlinDatabaseCreateUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateGremlinDatabaseParameters.GremlinDatabaseCreateUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateGremlinDatabaseParameters.GremlinDatabaseCreateUpdateProperties.Resource.ID", Name: validation.Null, Rule: true, Chain: nil}}},
{Target: "createUpdateGremlinDatabaseParameters.GremlinDatabaseCreateUpdateProperties.Options", Name: validation.Null, Rule: true, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "CreateUpdateGremlinDatabase", err.Error())
}
req, err := client.CreateUpdateGremlinDatabasePreparer(ctx, resourceGroupName, accountName, databaseName, createUpdateGremlinDatabaseParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateGremlinDatabase", nil, "Failure preparing request")
return
}
result, err = client.CreateUpdateGremlinDatabaseSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateGremlinDatabase", nil, "Failure sending request")
return
}
return
}
// CreateUpdateGremlinDatabasePreparer prepares the CreateUpdateGremlinDatabase request.
func (client DatabaseAccountsClient) CreateUpdateGremlinDatabasePreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, createUpdateGremlinDatabaseParameters GremlinDatabaseCreateUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/gremlin/databases/{databaseName}", pathParameters),
autorest.WithJSON(createUpdateGremlinDatabaseParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateUpdateGremlinDatabaseSender sends the CreateUpdateGremlinDatabase request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) CreateUpdateGremlinDatabaseSender(req *http.Request) (future DatabaseAccountsCreateUpdateGremlinDatabaseFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// CreateUpdateGremlinDatabaseResponder handles the response to the CreateUpdateGremlinDatabase request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) CreateUpdateGremlinDatabaseResponder(resp *http.Response) (result GremlinDatabase, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// CreateUpdateGremlinGraph create or update an Azure Cosmos DB Gremlin graph
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// graphName - cosmos DB graph name.
// createUpdateGremlinGraphParameters - the parameters to provide for the current Gremlin graph.
func (client DatabaseAccountsClient) CreateUpdateGremlinGraph(ctx context.Context, resourceGroupName string, accountName string, databaseName string, graphName string, createUpdateGremlinGraphParameters GremlinGraphCreateUpdateParameters) (result DatabaseAccountsCreateUpdateGremlinGraphFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.CreateUpdateGremlinGraph")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: createUpdateGremlinGraphParameters,
Constraints: []validation.Constraint{{Target: "createUpdateGremlinGraphParameters.GremlinGraphCreateUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateGremlinGraphParameters.GremlinGraphCreateUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateGremlinGraphParameters.GremlinGraphCreateUpdateProperties.Resource.ID", Name: validation.Null, Rule: true, Chain: nil}}},
{Target: "createUpdateGremlinGraphParameters.GremlinGraphCreateUpdateProperties.Options", Name: validation.Null, Rule: true, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "CreateUpdateGremlinGraph", err.Error())
}
req, err := client.CreateUpdateGremlinGraphPreparer(ctx, resourceGroupName, accountName, databaseName, graphName, createUpdateGremlinGraphParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateGremlinGraph", nil, "Failure preparing request")
return
}
result, err = client.CreateUpdateGremlinGraphSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateGremlinGraph", nil, "Failure sending request")
return
}
return
}
// CreateUpdateGremlinGraphPreparer prepares the CreateUpdateGremlinGraph request.
func (client DatabaseAccountsClient) CreateUpdateGremlinGraphPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, graphName string, createUpdateGremlinGraphParameters GremlinGraphCreateUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"graphName": autorest.Encode("path", graphName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/gremlin/databases/{databaseName}/graphs/{graphName}", pathParameters),
autorest.WithJSON(createUpdateGremlinGraphParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateUpdateGremlinGraphSender sends the CreateUpdateGremlinGraph request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) CreateUpdateGremlinGraphSender(req *http.Request) (future DatabaseAccountsCreateUpdateGremlinGraphFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// CreateUpdateGremlinGraphResponder handles the response to the CreateUpdateGremlinGraph request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) CreateUpdateGremlinGraphResponder(resp *http.Response) (result GremlinGraph, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// CreateUpdateMongoDBCollection create or update an Azure Cosmos DB MongoDB Collection
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// collectionName - cosmos DB collection name.
// createUpdateMongoDBCollectionParameters - the parameters to provide for the current MongoDB Collection.
func (client DatabaseAccountsClient) CreateUpdateMongoDBCollection(ctx context.Context, resourceGroupName string, accountName string, databaseName string, collectionName string, createUpdateMongoDBCollectionParameters MongoDBCollectionCreateUpdateParameters) (result DatabaseAccountsCreateUpdateMongoDBCollectionFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.CreateUpdateMongoDBCollection")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: createUpdateMongoDBCollectionParameters,
Constraints: []validation.Constraint{{Target: "createUpdateMongoDBCollectionParameters.MongoDBCollectionCreateUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateMongoDBCollectionParameters.MongoDBCollectionCreateUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateMongoDBCollectionParameters.MongoDBCollectionCreateUpdateProperties.Resource.ID", Name: validation.Null, Rule: true, Chain: nil}}},
{Target: "createUpdateMongoDBCollectionParameters.MongoDBCollectionCreateUpdateProperties.Options", Name: validation.Null, Rule: true, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "CreateUpdateMongoDBCollection", err.Error())
}
req, err := client.CreateUpdateMongoDBCollectionPreparer(ctx, resourceGroupName, accountName, databaseName, collectionName, createUpdateMongoDBCollectionParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateMongoDBCollection", nil, "Failure preparing request")
return
}
result, err = client.CreateUpdateMongoDBCollectionSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateMongoDBCollection", nil, "Failure sending request")
return
}
return
}
// CreateUpdateMongoDBCollectionPreparer prepares the CreateUpdateMongoDBCollection request.
func (client DatabaseAccountsClient) CreateUpdateMongoDBCollectionPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, collectionName string, createUpdateMongoDBCollectionParameters MongoDBCollectionCreateUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"collectionName": autorest.Encode("path", collectionName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/mongodb/databases/{databaseName}/collections/{collectionName}", pathParameters),
autorest.WithJSON(createUpdateMongoDBCollectionParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateUpdateMongoDBCollectionSender sends the CreateUpdateMongoDBCollection request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) CreateUpdateMongoDBCollectionSender(req *http.Request) (future DatabaseAccountsCreateUpdateMongoDBCollectionFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// CreateUpdateMongoDBCollectionResponder handles the response to the CreateUpdateMongoDBCollection request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) CreateUpdateMongoDBCollectionResponder(resp *http.Response) (result MongoDBCollection, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// CreateUpdateMongoDBDatabase create or updates Azure Cosmos DB MongoDB database
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// createUpdateMongoDBDatabaseParameters - the parameters to provide for the current MongoDB database.
func (client DatabaseAccountsClient) CreateUpdateMongoDBDatabase(ctx context.Context, resourceGroupName string, accountName string, databaseName string, createUpdateMongoDBDatabaseParameters MongoDBDatabaseCreateUpdateParameters) (result DatabaseAccountsCreateUpdateMongoDBDatabaseFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.CreateUpdateMongoDBDatabase")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: createUpdateMongoDBDatabaseParameters,
Constraints: []validation.Constraint{{Target: "createUpdateMongoDBDatabaseParameters.MongoDBDatabaseCreateUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateMongoDBDatabaseParameters.MongoDBDatabaseCreateUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateMongoDBDatabaseParameters.MongoDBDatabaseCreateUpdateProperties.Resource.ID", Name: validation.Null, Rule: true, Chain: nil}}},
{Target: "createUpdateMongoDBDatabaseParameters.MongoDBDatabaseCreateUpdateProperties.Options", Name: validation.Null, Rule: true, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "CreateUpdateMongoDBDatabase", err.Error())
}
req, err := client.CreateUpdateMongoDBDatabasePreparer(ctx, resourceGroupName, accountName, databaseName, createUpdateMongoDBDatabaseParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateMongoDBDatabase", nil, "Failure preparing request")
return
}
result, err = client.CreateUpdateMongoDBDatabaseSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateMongoDBDatabase", nil, "Failure sending request")
return
}
return
}
// CreateUpdateMongoDBDatabasePreparer prepares the CreateUpdateMongoDBDatabase request.
func (client DatabaseAccountsClient) CreateUpdateMongoDBDatabasePreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, createUpdateMongoDBDatabaseParameters MongoDBDatabaseCreateUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/mongodb/databases/{databaseName}", pathParameters),
autorest.WithJSON(createUpdateMongoDBDatabaseParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateUpdateMongoDBDatabaseSender sends the CreateUpdateMongoDBDatabase request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) CreateUpdateMongoDBDatabaseSender(req *http.Request) (future DatabaseAccountsCreateUpdateMongoDBDatabaseFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// CreateUpdateMongoDBDatabaseResponder handles the response to the CreateUpdateMongoDBDatabase request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) CreateUpdateMongoDBDatabaseResponder(resp *http.Response) (result MongoDBDatabase, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// CreateUpdateSQLContainer create or update an Azure Cosmos DB SQL container
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// containerName - cosmos DB container name.
// createUpdateSQLContainerParameters - the parameters to provide for the current SQL container.
func (client DatabaseAccountsClient) CreateUpdateSQLContainer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, containerName string, createUpdateSQLContainerParameters SQLContainerCreateUpdateParameters) (result DatabaseAccountsCreateUpdateSQLContainerFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.CreateUpdateSQLContainer")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: createUpdateSQLContainerParameters,
Constraints: []validation.Constraint{{Target: "createUpdateSQLContainerParameters.SQLContainerCreateUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateSQLContainerParameters.SQLContainerCreateUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateSQLContainerParameters.SQLContainerCreateUpdateProperties.Resource.ID", Name: validation.Null, Rule: true, Chain: nil}}},
{Target: "createUpdateSQLContainerParameters.SQLContainerCreateUpdateProperties.Options", Name: validation.Null, Rule: true, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "CreateUpdateSQLContainer", err.Error())
}
req, err := client.CreateUpdateSQLContainerPreparer(ctx, resourceGroupName, accountName, databaseName, containerName, createUpdateSQLContainerParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateSQLContainer", nil, "Failure preparing request")
return
}
result, err = client.CreateUpdateSQLContainerSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateSQLContainer", nil, "Failure sending request")
return
}
return
}
// CreateUpdateSQLContainerPreparer prepares the CreateUpdateSQLContainer request.
func (client DatabaseAccountsClient) CreateUpdateSQLContainerPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, containerName string, createUpdateSQLContainerParameters SQLContainerCreateUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"containerName": autorest.Encode("path", containerName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/sql/databases/{databaseName}/containers/{containerName}", pathParameters),
autorest.WithJSON(createUpdateSQLContainerParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateUpdateSQLContainerSender sends the CreateUpdateSQLContainer request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) CreateUpdateSQLContainerSender(req *http.Request) (future DatabaseAccountsCreateUpdateSQLContainerFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// CreateUpdateSQLContainerResponder handles the response to the CreateUpdateSQLContainer request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) CreateUpdateSQLContainerResponder(resp *http.Response) (result SQLContainer, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// CreateUpdateSQLDatabase create or update an Azure Cosmos DB SQL database
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// createUpdateSQLDatabaseParameters - the parameters to provide for the current SQL database.
func (client DatabaseAccountsClient) CreateUpdateSQLDatabase(ctx context.Context, resourceGroupName string, accountName string, databaseName string, createUpdateSQLDatabaseParameters SQLDatabaseCreateUpdateParameters) (result DatabaseAccountsCreateUpdateSQLDatabaseFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.CreateUpdateSQLDatabase")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: createUpdateSQLDatabaseParameters,
Constraints: []validation.Constraint{{Target: "createUpdateSQLDatabaseParameters.SQLDatabaseCreateUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateSQLDatabaseParameters.SQLDatabaseCreateUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateSQLDatabaseParameters.SQLDatabaseCreateUpdateProperties.Resource.ID", Name: validation.Null, Rule: true, Chain: nil}}},
{Target: "createUpdateSQLDatabaseParameters.SQLDatabaseCreateUpdateProperties.Options", Name: validation.Null, Rule: true, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "CreateUpdateSQLDatabase", err.Error())
}
req, err := client.CreateUpdateSQLDatabasePreparer(ctx, resourceGroupName, accountName, databaseName, createUpdateSQLDatabaseParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateSQLDatabase", nil, "Failure preparing request")
return
}
result, err = client.CreateUpdateSQLDatabaseSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateSQLDatabase", nil, "Failure sending request")
return
}
return
}
// CreateUpdateSQLDatabasePreparer prepares the CreateUpdateSQLDatabase request.
func (client DatabaseAccountsClient) CreateUpdateSQLDatabasePreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, createUpdateSQLDatabaseParameters SQLDatabaseCreateUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/sql/databases/{databaseName}", pathParameters),
autorest.WithJSON(createUpdateSQLDatabaseParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateUpdateSQLDatabaseSender sends the CreateUpdateSQLDatabase request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) CreateUpdateSQLDatabaseSender(req *http.Request) (future DatabaseAccountsCreateUpdateSQLDatabaseFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// CreateUpdateSQLDatabaseResponder handles the response to the CreateUpdateSQLDatabase request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) CreateUpdateSQLDatabaseResponder(resp *http.Response) (result SQLDatabase, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// CreateUpdateTable create or update an Azure Cosmos DB Table
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// tableName - cosmos DB table name.
// createUpdateTableParameters - the parameters to provide for the current Table.
func (client DatabaseAccountsClient) CreateUpdateTable(ctx context.Context, resourceGroupName string, accountName string, tableName string, createUpdateTableParameters TableCreateUpdateParameters) (result DatabaseAccountsCreateUpdateTableFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.CreateUpdateTable")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: createUpdateTableParameters,
Constraints: []validation.Constraint{{Target: "createUpdateTableParameters.TableCreateUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateTableParameters.TableCreateUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "createUpdateTableParameters.TableCreateUpdateProperties.Resource.ID", Name: validation.Null, Rule: true, Chain: nil}}},
{Target: "createUpdateTableParameters.TableCreateUpdateProperties.Options", Name: validation.Null, Rule: true, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "CreateUpdateTable", err.Error())
}
req, err := client.CreateUpdateTablePreparer(ctx, resourceGroupName, accountName, tableName, createUpdateTableParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateTable", nil, "Failure preparing request")
return
}
result, err = client.CreateUpdateTableSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateUpdateTable", nil, "Failure sending request")
return
}
return
}
// CreateUpdateTablePreparer prepares the CreateUpdateTable request.
func (client DatabaseAccountsClient) CreateUpdateTablePreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string, createUpdateTableParameters TableCreateUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/table/tables/{tableName}", pathParameters),
autorest.WithJSON(createUpdateTableParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateUpdateTableSender sends the CreateUpdateTable request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) CreateUpdateTableSender(req *http.Request) (future DatabaseAccountsCreateUpdateTableFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// CreateUpdateTableResponder handles the response to the CreateUpdateTable request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) CreateUpdateTableResponder(resp *http.Response) (result Table, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
func (client DatabaseAccountsClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result DatabaseAccountsDeleteFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.Delete")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "Delete", err.Error())
}
req, err := client.DeletePreparer(ctx, resourceGroupName, accountName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Delete", nil, "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client DatabaseAccountsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) DeleteSender(req *http.Request) (future DatabaseAccountsDeleteFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// DeleteCassandraKeyspace deletes an existing Azure Cosmos DB Cassandra keyspace.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// keyspaceName - cosmos DB keyspace name.
func (client DatabaseAccountsClient) DeleteCassandraKeyspace(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string) (result DatabaseAccountsDeleteCassandraKeyspaceFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.DeleteCassandraKeyspace")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "DeleteCassandraKeyspace", err.Error())
}
req, err := client.DeleteCassandraKeyspacePreparer(ctx, resourceGroupName, accountName, keyspaceName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteCassandraKeyspace", nil, "Failure preparing request")
return
}
result, err = client.DeleteCassandraKeyspaceSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteCassandraKeyspace", nil, "Failure sending request")
return
}
return
}
// DeleteCassandraKeyspacePreparer prepares the DeleteCassandraKeyspace request.
func (client DatabaseAccountsClient) DeleteCassandraKeyspacePreparer(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"keyspaceName": autorest.Encode("path", keyspaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/cassandra/keyspaces/{keyspaceName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteCassandraKeyspaceSender sends the DeleteCassandraKeyspace request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) DeleteCassandraKeyspaceSender(req *http.Request) (future DatabaseAccountsDeleteCassandraKeyspaceFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// DeleteCassandraKeyspaceResponder handles the response to the DeleteCassandraKeyspace request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) DeleteCassandraKeyspaceResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// DeleteCassandraTable deletes an existing Azure Cosmos DB Cassandra table.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// keyspaceName - cosmos DB keyspace name.
// tableName - cosmos DB table name.
func (client DatabaseAccountsClient) DeleteCassandraTable(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, tableName string) (result DatabaseAccountsDeleteCassandraTableFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.DeleteCassandraTable")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "DeleteCassandraTable", err.Error())
}
req, err := client.DeleteCassandraTablePreparer(ctx, resourceGroupName, accountName, keyspaceName, tableName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteCassandraTable", nil, "Failure preparing request")
return
}
result, err = client.DeleteCassandraTableSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteCassandraTable", nil, "Failure sending request")
return
}
return
}
// DeleteCassandraTablePreparer prepares the DeleteCassandraTable request.
func (client DatabaseAccountsClient) DeleteCassandraTablePreparer(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, tableName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"keyspaceName": autorest.Encode("path", keyspaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/cassandra/keyspaces/{keyspaceName}/tables/{tableName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteCassandraTableSender sends the DeleteCassandraTable request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) DeleteCassandraTableSender(req *http.Request) (future DatabaseAccountsDeleteCassandraTableFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// DeleteCassandraTableResponder handles the response to the DeleteCassandraTable request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) DeleteCassandraTableResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// DeleteGremlinDatabase deletes an existing Azure Cosmos DB Gremlin database.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
func (client DatabaseAccountsClient) DeleteGremlinDatabase(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (result DatabaseAccountsDeleteGremlinDatabaseFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.DeleteGremlinDatabase")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "DeleteGremlinDatabase", err.Error())
}
req, err := client.DeleteGremlinDatabasePreparer(ctx, resourceGroupName, accountName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteGremlinDatabase", nil, "Failure preparing request")
return
}
result, err = client.DeleteGremlinDatabaseSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteGremlinDatabase", nil, "Failure sending request")
return
}
return
}
// DeleteGremlinDatabasePreparer prepares the DeleteGremlinDatabase request.
func (client DatabaseAccountsClient) DeleteGremlinDatabasePreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/gremlin/databases/{databaseName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteGremlinDatabaseSender sends the DeleteGremlinDatabase request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) DeleteGremlinDatabaseSender(req *http.Request) (future DatabaseAccountsDeleteGremlinDatabaseFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// DeleteGremlinDatabaseResponder handles the response to the DeleteGremlinDatabase request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) DeleteGremlinDatabaseResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// DeleteGremlinGraph deletes an existing Azure Cosmos DB Gremlin graph.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// graphName - cosmos DB graph name.
func (client DatabaseAccountsClient) DeleteGremlinGraph(ctx context.Context, resourceGroupName string, accountName string, databaseName string, graphName string) (result DatabaseAccountsDeleteGremlinGraphFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.DeleteGremlinGraph")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "DeleteGremlinGraph", err.Error())
}
req, err := client.DeleteGremlinGraphPreparer(ctx, resourceGroupName, accountName, databaseName, graphName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteGremlinGraph", nil, "Failure preparing request")
return
}
result, err = client.DeleteGremlinGraphSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteGremlinGraph", nil, "Failure sending request")
return
}
return
}
// DeleteGremlinGraphPreparer prepares the DeleteGremlinGraph request.
func (client DatabaseAccountsClient) DeleteGremlinGraphPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, graphName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"graphName": autorest.Encode("path", graphName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/gremlin/databases/{databaseName}/graphs/{graphName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteGremlinGraphSender sends the DeleteGremlinGraph request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) DeleteGremlinGraphSender(req *http.Request) (future DatabaseAccountsDeleteGremlinGraphFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// DeleteGremlinGraphResponder handles the response to the DeleteGremlinGraph request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) DeleteGremlinGraphResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// DeleteMongoDBCollection deletes an existing Azure Cosmos DB MongoDB Collection.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// collectionName - cosmos DB collection name.
func (client DatabaseAccountsClient) DeleteMongoDBCollection(ctx context.Context, resourceGroupName string, accountName string, databaseName string, collectionName string) (result DatabaseAccountsDeleteMongoDBCollectionFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.DeleteMongoDBCollection")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "DeleteMongoDBCollection", err.Error())
}
req, err := client.DeleteMongoDBCollectionPreparer(ctx, resourceGroupName, accountName, databaseName, collectionName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteMongoDBCollection", nil, "Failure preparing request")
return
}
result, err = client.DeleteMongoDBCollectionSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteMongoDBCollection", nil, "Failure sending request")
return
}
return
}
// DeleteMongoDBCollectionPreparer prepares the DeleteMongoDBCollection request.
func (client DatabaseAccountsClient) DeleteMongoDBCollectionPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, collectionName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"collectionName": autorest.Encode("path", collectionName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/mongodb/databases/{databaseName}/collections/{collectionName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteMongoDBCollectionSender sends the DeleteMongoDBCollection request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) DeleteMongoDBCollectionSender(req *http.Request) (future DatabaseAccountsDeleteMongoDBCollectionFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// DeleteMongoDBCollectionResponder handles the response to the DeleteMongoDBCollection request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) DeleteMongoDBCollectionResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// DeleteMongoDBDatabase deletes an existing Azure Cosmos DB MongoDB database.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
func (client DatabaseAccountsClient) DeleteMongoDBDatabase(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (result DatabaseAccountsDeleteMongoDBDatabaseFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.DeleteMongoDBDatabase")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "DeleteMongoDBDatabase", err.Error())
}
req, err := client.DeleteMongoDBDatabasePreparer(ctx, resourceGroupName, accountName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteMongoDBDatabase", nil, "Failure preparing request")
return
}
result, err = client.DeleteMongoDBDatabaseSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteMongoDBDatabase", nil, "Failure sending request")
return
}
return
}
// DeleteMongoDBDatabasePreparer prepares the DeleteMongoDBDatabase request.
func (client DatabaseAccountsClient) DeleteMongoDBDatabasePreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/mongodb/databases/{databaseName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteMongoDBDatabaseSender sends the DeleteMongoDBDatabase request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) DeleteMongoDBDatabaseSender(req *http.Request) (future DatabaseAccountsDeleteMongoDBDatabaseFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// DeleteMongoDBDatabaseResponder handles the response to the DeleteMongoDBDatabase request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) DeleteMongoDBDatabaseResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// DeleteSQLContainer deletes an existing Azure Cosmos DB SQL container.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// containerName - cosmos DB container name.
func (client DatabaseAccountsClient) DeleteSQLContainer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, containerName string) (result DatabaseAccountsDeleteSQLContainerFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.DeleteSQLContainer")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "DeleteSQLContainer", err.Error())
}
req, err := client.DeleteSQLContainerPreparer(ctx, resourceGroupName, accountName, databaseName, containerName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteSQLContainer", nil, "Failure preparing request")
return
}
result, err = client.DeleteSQLContainerSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteSQLContainer", nil, "Failure sending request")
return
}
return
}
// DeleteSQLContainerPreparer prepares the DeleteSQLContainer request.
func (client DatabaseAccountsClient) DeleteSQLContainerPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, containerName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"containerName": autorest.Encode("path", containerName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/sql/databases/{databaseName}/containers/{containerName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSQLContainerSender sends the DeleteSQLContainer request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) DeleteSQLContainerSender(req *http.Request) (future DatabaseAccountsDeleteSQLContainerFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// DeleteSQLContainerResponder handles the response to the DeleteSQLContainer request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) DeleteSQLContainerResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// DeleteSQLDatabase deletes an existing Azure Cosmos DB SQL database.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
func (client DatabaseAccountsClient) DeleteSQLDatabase(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (result DatabaseAccountsDeleteSQLDatabaseFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.DeleteSQLDatabase")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "DeleteSQLDatabase", err.Error())
}
req, err := client.DeleteSQLDatabasePreparer(ctx, resourceGroupName, accountName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteSQLDatabase", nil, "Failure preparing request")
return
}
result, err = client.DeleteSQLDatabaseSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteSQLDatabase", nil, "Failure sending request")
return
}
return
}
// DeleteSQLDatabasePreparer prepares the DeleteSQLDatabase request.
func (client DatabaseAccountsClient) DeleteSQLDatabasePreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/sql/databases/{databaseName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSQLDatabaseSender sends the DeleteSQLDatabase request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) DeleteSQLDatabaseSender(req *http.Request) (future DatabaseAccountsDeleteSQLDatabaseFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// DeleteSQLDatabaseResponder handles the response to the DeleteSQLDatabase request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) DeleteSQLDatabaseResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// DeleteTable deletes an existing Azure Cosmos DB Table.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// tableName - cosmos DB table name.
func (client DatabaseAccountsClient) DeleteTable(ctx context.Context, resourceGroupName string, accountName string, tableName string) (result DatabaseAccountsDeleteTableFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.DeleteTable")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "DeleteTable", err.Error())
}
req, err := client.DeleteTablePreparer(ctx, resourceGroupName, accountName, tableName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteTable", nil, "Failure preparing request")
return
}
result, err = client.DeleteTableSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "DeleteTable", nil, "Failure sending request")
return
}
return
}
// DeleteTablePreparer prepares the DeleteTable request.
func (client DatabaseAccountsClient) DeleteTablePreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/table/tables/{tableName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteTableSender sends the DeleteTable request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) DeleteTableSender(req *http.Request) (future DatabaseAccountsDeleteTableFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// DeleteTableResponder handles the response to the DeleteTable request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) DeleteTableResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// FailoverPriorityChange changes the failover priority for the Azure Cosmos DB database account. A failover priority
// of 0 indicates a write region. The maximum value for a failover priority = (total number of regions - 1). Failover
// priority values must be unique for each of the regions in which the database account exists.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// failoverParameters - the new failover policies for the database account.
func (client DatabaseAccountsClient) FailoverPriorityChange(ctx context.Context, resourceGroupName string, accountName string, failoverParameters FailoverPolicies) (result DatabaseAccountsFailoverPriorityChangeFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.FailoverPriorityChange")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: failoverParameters,
Constraints: []validation.Constraint{{Target: "failoverParameters.FailoverPolicies", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "FailoverPriorityChange", err.Error())
}
req, err := client.FailoverPriorityChangePreparer(ctx, resourceGroupName, accountName, failoverParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "FailoverPriorityChange", nil, "Failure preparing request")
return
}
result, err = client.FailoverPriorityChangeSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "FailoverPriorityChange", nil, "Failure sending request")
return
}
return
}
// FailoverPriorityChangePreparer prepares the FailoverPriorityChange request.
func (client DatabaseAccountsClient) FailoverPriorityChangePreparer(ctx context.Context, resourceGroupName string, accountName string, failoverParameters FailoverPolicies) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/failoverPriorityChange", pathParameters),
autorest.WithJSON(failoverParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// FailoverPriorityChangeSender sends the FailoverPriorityChange request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) FailoverPriorityChangeSender(req *http.Request) (future DatabaseAccountsFailoverPriorityChangeFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// FailoverPriorityChangeResponder handles the response to the FailoverPriorityChange request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) FailoverPriorityChangeResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get retrieves the properties of an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
func (client DatabaseAccountsClient) Get(ctx context.Context, resourceGroupName string, accountName string) (result DatabaseAccount, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "Get", err.Error())
}
req, err := client.GetPreparer(ctx, resourceGroupName, accountName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Get", resp, "Failure responding to request")
return
}
return
}
// GetPreparer prepares the Get request.
func (client DatabaseAccountsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetResponder(resp *http.Response) (result DatabaseAccount, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetCassandraKeyspace gets the Cassandra keyspaces under an existing Azure Cosmos DB database account with the
// provided name.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// keyspaceName - cosmos DB keyspace name.
func (client DatabaseAccountsClient) GetCassandraKeyspace(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string) (result CassandraKeyspace, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetCassandraKeyspace")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetCassandraKeyspace", err.Error())
}
req, err := client.GetCassandraKeyspacePreparer(ctx, resourceGroupName, accountName, keyspaceName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetCassandraKeyspace", nil, "Failure preparing request")
return
}
resp, err := client.GetCassandraKeyspaceSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetCassandraKeyspace", resp, "Failure sending request")
return
}
result, err = client.GetCassandraKeyspaceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetCassandraKeyspace", resp, "Failure responding to request")
return
}
return
}
// GetCassandraKeyspacePreparer prepares the GetCassandraKeyspace request.
func (client DatabaseAccountsClient) GetCassandraKeyspacePreparer(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"keyspaceName": autorest.Encode("path", keyspaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/cassandra/keyspaces/{keyspaceName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetCassandraKeyspaceSender sends the GetCassandraKeyspace request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetCassandraKeyspaceSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetCassandraKeyspaceResponder handles the response to the GetCassandraKeyspace request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetCassandraKeyspaceResponder(resp *http.Response) (result CassandraKeyspace, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetCassandraKeyspaceThroughput gets the RUs per second of the Cassandra Keyspace under an existing Azure Cosmos DB
// database account with the provided name.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// keyspaceName - cosmos DB keyspace name.
func (client DatabaseAccountsClient) GetCassandraKeyspaceThroughput(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string) (result Throughput, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetCassandraKeyspaceThroughput")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetCassandraKeyspaceThroughput", err.Error())
}
req, err := client.GetCassandraKeyspaceThroughputPreparer(ctx, resourceGroupName, accountName, keyspaceName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetCassandraKeyspaceThroughput", nil, "Failure preparing request")
return
}
resp, err := client.GetCassandraKeyspaceThroughputSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetCassandraKeyspaceThroughput", resp, "Failure sending request")
return
}
result, err = client.GetCassandraKeyspaceThroughputResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetCassandraKeyspaceThroughput", resp, "Failure responding to request")
return
}
return
}
// GetCassandraKeyspaceThroughputPreparer prepares the GetCassandraKeyspaceThroughput request.
func (client DatabaseAccountsClient) GetCassandraKeyspaceThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"keyspaceName": autorest.Encode("path", keyspaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/cassandra/keyspaces/{keyspaceName}/settings/throughput", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetCassandraKeyspaceThroughputSender sends the GetCassandraKeyspaceThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetCassandraKeyspaceThroughputSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetCassandraKeyspaceThroughputResponder handles the response to the GetCassandraKeyspaceThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetCassandraKeyspaceThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetCassandraTable gets the Cassandra table under an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// keyspaceName - cosmos DB keyspace name.
// tableName - cosmos DB table name.
func (client DatabaseAccountsClient) GetCassandraTable(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, tableName string) (result CassandraTable, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetCassandraTable")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetCassandraTable", err.Error())
}
req, err := client.GetCassandraTablePreparer(ctx, resourceGroupName, accountName, keyspaceName, tableName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetCassandraTable", nil, "Failure preparing request")
return
}
resp, err := client.GetCassandraTableSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetCassandraTable", resp, "Failure sending request")
return
}
result, err = client.GetCassandraTableResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetCassandraTable", resp, "Failure responding to request")
return
}
return
}
// GetCassandraTablePreparer prepares the GetCassandraTable request.
func (client DatabaseAccountsClient) GetCassandraTablePreparer(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, tableName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"keyspaceName": autorest.Encode("path", keyspaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/cassandra/keyspaces/{keyspaceName}/tables/{tableName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetCassandraTableSender sends the GetCassandraTable request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetCassandraTableSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetCassandraTableResponder handles the response to the GetCassandraTable request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetCassandraTableResponder(resp *http.Response) (result CassandraTable, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetCassandraTableThroughput gets the RUs per second of the Cassandra table under an existing Azure Cosmos DB
// database account with the provided name.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// keyspaceName - cosmos DB keyspace name.
// tableName - cosmos DB table name.
func (client DatabaseAccountsClient) GetCassandraTableThroughput(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, tableName string) (result Throughput, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetCassandraTableThroughput")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetCassandraTableThroughput", err.Error())
}
req, err := client.GetCassandraTableThroughputPreparer(ctx, resourceGroupName, accountName, keyspaceName, tableName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetCassandraTableThroughput", nil, "Failure preparing request")
return
}
resp, err := client.GetCassandraTableThroughputSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetCassandraTableThroughput", resp, "Failure sending request")
return
}
result, err = client.GetCassandraTableThroughputResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetCassandraTableThroughput", resp, "Failure responding to request")
return
}
return
}
// GetCassandraTableThroughputPreparer prepares the GetCassandraTableThroughput request.
func (client DatabaseAccountsClient) GetCassandraTableThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, tableName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"keyspaceName": autorest.Encode("path", keyspaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/cassandra/keyspaces/{keyspaceName}/tables/{tableName}/settings/throughput", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetCassandraTableThroughputSender sends the GetCassandraTableThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetCassandraTableThroughputSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetCassandraTableThroughputResponder handles the response to the GetCassandraTableThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetCassandraTableThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetGremlinDatabase gets the Gremlin databases under an existing Azure Cosmos DB database account with the provided
// name.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
func (client DatabaseAccountsClient) GetGremlinDatabase(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (result GremlinDatabase, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetGremlinDatabase")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetGremlinDatabase", err.Error())
}
req, err := client.GetGremlinDatabasePreparer(ctx, resourceGroupName, accountName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetGremlinDatabase", nil, "Failure preparing request")
return
}
resp, err := client.GetGremlinDatabaseSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetGremlinDatabase", resp, "Failure sending request")
return
}
result, err = client.GetGremlinDatabaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetGremlinDatabase", resp, "Failure responding to request")
return
}
return
}
// GetGremlinDatabasePreparer prepares the GetGremlinDatabase request.
func (client DatabaseAccountsClient) GetGremlinDatabasePreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/gremlin/databases/{databaseName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetGremlinDatabaseSender sends the GetGremlinDatabase request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetGremlinDatabaseSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetGremlinDatabaseResponder handles the response to the GetGremlinDatabase request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetGremlinDatabaseResponder(resp *http.Response) (result GremlinDatabase, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetGremlinDatabaseThroughput gets the RUs per second of the Gremlin database under an existing Azure Cosmos DB
// database account with the provided name.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
func (client DatabaseAccountsClient) GetGremlinDatabaseThroughput(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (result Throughput, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetGremlinDatabaseThroughput")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetGremlinDatabaseThroughput", err.Error())
}
req, err := client.GetGremlinDatabaseThroughputPreparer(ctx, resourceGroupName, accountName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetGremlinDatabaseThroughput", nil, "Failure preparing request")
return
}
resp, err := client.GetGremlinDatabaseThroughputSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetGremlinDatabaseThroughput", resp, "Failure sending request")
return
}
result, err = client.GetGremlinDatabaseThroughputResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetGremlinDatabaseThroughput", resp, "Failure responding to request")
return
}
return
}
// GetGremlinDatabaseThroughputPreparer prepares the GetGremlinDatabaseThroughput request.
func (client DatabaseAccountsClient) GetGremlinDatabaseThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/gremlin/databases/{databaseName}/settings/throughput", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetGremlinDatabaseThroughputSender sends the GetGremlinDatabaseThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetGremlinDatabaseThroughputSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetGremlinDatabaseThroughputResponder handles the response to the GetGremlinDatabaseThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetGremlinDatabaseThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetGremlinGraph gets the Gremlin graph under an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// graphName - cosmos DB graph name.
func (client DatabaseAccountsClient) GetGremlinGraph(ctx context.Context, resourceGroupName string, accountName string, databaseName string, graphName string) (result GremlinGraph, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetGremlinGraph")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetGremlinGraph", err.Error())
}
req, err := client.GetGremlinGraphPreparer(ctx, resourceGroupName, accountName, databaseName, graphName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetGremlinGraph", nil, "Failure preparing request")
return
}
resp, err := client.GetGremlinGraphSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetGremlinGraph", resp, "Failure sending request")
return
}
result, err = client.GetGremlinGraphResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetGremlinGraph", resp, "Failure responding to request")
return
}
return
}
// GetGremlinGraphPreparer prepares the GetGremlinGraph request.
func (client DatabaseAccountsClient) GetGremlinGraphPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, graphName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"graphName": autorest.Encode("path", graphName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/gremlin/databases/{databaseName}/graphs/{graphName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetGremlinGraphSender sends the GetGremlinGraph request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetGremlinGraphSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetGremlinGraphResponder handles the response to the GetGremlinGraph request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetGremlinGraphResponder(resp *http.Response) (result GremlinGraph, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetGremlinGraphThroughput gets the Gremlin graph throughput under an existing Azure Cosmos DB database account with
// the provided name.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// graphName - cosmos DB graph name.
func (client DatabaseAccountsClient) GetGremlinGraphThroughput(ctx context.Context, resourceGroupName string, accountName string, databaseName string, graphName string) (result Throughput, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetGremlinGraphThroughput")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetGremlinGraphThroughput", err.Error())
}
req, err := client.GetGremlinGraphThroughputPreparer(ctx, resourceGroupName, accountName, databaseName, graphName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetGremlinGraphThroughput", nil, "Failure preparing request")
return
}
resp, err := client.GetGremlinGraphThroughputSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetGremlinGraphThroughput", resp, "Failure sending request")
return
}
result, err = client.GetGremlinGraphThroughputResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetGremlinGraphThroughput", resp, "Failure responding to request")
return
}
return
}
// GetGremlinGraphThroughputPreparer prepares the GetGremlinGraphThroughput request.
func (client DatabaseAccountsClient) GetGremlinGraphThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, graphName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"graphName": autorest.Encode("path", graphName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/gremlin/databases/{databaseName}/graphs/{graphName}/settings/throughput", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetGremlinGraphThroughputSender sends the GetGremlinGraphThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetGremlinGraphThroughputSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetGremlinGraphThroughputResponder handles the response to the GetGremlinGraphThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetGremlinGraphThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetMongoDBCollection gets the MongoDB collection under an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// collectionName - cosmos DB collection name.
func (client DatabaseAccountsClient) GetMongoDBCollection(ctx context.Context, resourceGroupName string, accountName string, databaseName string, collectionName string) (result MongoDBCollection, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetMongoDBCollection")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetMongoDBCollection", err.Error())
}
req, err := client.GetMongoDBCollectionPreparer(ctx, resourceGroupName, accountName, databaseName, collectionName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetMongoDBCollection", nil, "Failure preparing request")
return
}
resp, err := client.GetMongoDBCollectionSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetMongoDBCollection", resp, "Failure sending request")
return
}
result, err = client.GetMongoDBCollectionResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetMongoDBCollection", resp, "Failure responding to request")
return
}
return
}
// GetMongoDBCollectionPreparer prepares the GetMongoDBCollection request.
func (client DatabaseAccountsClient) GetMongoDBCollectionPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, collectionName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"collectionName": autorest.Encode("path", collectionName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/mongodb/databases/{databaseName}/collections/{collectionName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetMongoDBCollectionSender sends the GetMongoDBCollection request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetMongoDBCollectionSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetMongoDBCollectionResponder handles the response to the GetMongoDBCollection request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetMongoDBCollectionResponder(resp *http.Response) (result MongoDBCollection, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetMongoDBCollectionThroughput gets the RUs per second of the MongoDB collection under an existing Azure Cosmos DB
// database account with the provided name.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// collectionName - cosmos DB collection name.
func (client DatabaseAccountsClient) GetMongoDBCollectionThroughput(ctx context.Context, resourceGroupName string, accountName string, databaseName string, collectionName string) (result Throughput, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetMongoDBCollectionThroughput")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetMongoDBCollectionThroughput", err.Error())
}
req, err := client.GetMongoDBCollectionThroughputPreparer(ctx, resourceGroupName, accountName, databaseName, collectionName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetMongoDBCollectionThroughput", nil, "Failure preparing request")
return
}
resp, err := client.GetMongoDBCollectionThroughputSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetMongoDBCollectionThroughput", resp, "Failure sending request")
return
}
result, err = client.GetMongoDBCollectionThroughputResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetMongoDBCollectionThroughput", resp, "Failure responding to request")
return
}
return
}
// GetMongoDBCollectionThroughputPreparer prepares the GetMongoDBCollectionThroughput request.
func (client DatabaseAccountsClient) GetMongoDBCollectionThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, collectionName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"collectionName": autorest.Encode("path", collectionName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/mongodb/databases/{databaseName}/collections/{collectionName}/settings/throughput", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetMongoDBCollectionThroughputSender sends the GetMongoDBCollectionThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetMongoDBCollectionThroughputSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetMongoDBCollectionThroughputResponder handles the response to the GetMongoDBCollectionThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetMongoDBCollectionThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetMongoDBDatabase gets the MongoDB databases under an existing Azure Cosmos DB database account with the provided
// name.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
func (client DatabaseAccountsClient) GetMongoDBDatabase(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (result MongoDBDatabase, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetMongoDBDatabase")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetMongoDBDatabase", err.Error())
}
req, err := client.GetMongoDBDatabasePreparer(ctx, resourceGroupName, accountName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetMongoDBDatabase", nil, "Failure preparing request")
return
}
resp, err := client.GetMongoDBDatabaseSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetMongoDBDatabase", resp, "Failure sending request")
return
}
result, err = client.GetMongoDBDatabaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetMongoDBDatabase", resp, "Failure responding to request")
return
}
return
}
// GetMongoDBDatabasePreparer prepares the GetMongoDBDatabase request.
func (client DatabaseAccountsClient) GetMongoDBDatabasePreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/mongodb/databases/{databaseName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetMongoDBDatabaseSender sends the GetMongoDBDatabase request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetMongoDBDatabaseSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetMongoDBDatabaseResponder handles the response to the GetMongoDBDatabase request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetMongoDBDatabaseResponder(resp *http.Response) (result MongoDBDatabase, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetMongoDBDatabaseThroughput gets the RUs per second of the MongoDB database under an existing Azure Cosmos DB
// database account with the provided name.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
func (client DatabaseAccountsClient) GetMongoDBDatabaseThroughput(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (result Throughput, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetMongoDBDatabaseThroughput")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetMongoDBDatabaseThroughput", err.Error())
}
req, err := client.GetMongoDBDatabaseThroughputPreparer(ctx, resourceGroupName, accountName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetMongoDBDatabaseThroughput", nil, "Failure preparing request")
return
}
resp, err := client.GetMongoDBDatabaseThroughputSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetMongoDBDatabaseThroughput", resp, "Failure sending request")
return
}
result, err = client.GetMongoDBDatabaseThroughputResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetMongoDBDatabaseThroughput", resp, "Failure responding to request")
return
}
return
}
// GetMongoDBDatabaseThroughputPreparer prepares the GetMongoDBDatabaseThroughput request.
func (client DatabaseAccountsClient) GetMongoDBDatabaseThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/mongodb/databases/{databaseName}/settings/throughput", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetMongoDBDatabaseThroughputSender sends the GetMongoDBDatabaseThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetMongoDBDatabaseThroughputSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetMongoDBDatabaseThroughputResponder handles the response to the GetMongoDBDatabaseThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetMongoDBDatabaseThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetReadOnlyKeys lists the read-only access keys for the specified Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
func (client DatabaseAccountsClient) GetReadOnlyKeys(ctx context.Context, resourceGroupName string, accountName string) (result DatabaseAccountListReadOnlyKeysResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetReadOnlyKeys")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetReadOnlyKeys", err.Error())
}
req, err := client.GetReadOnlyKeysPreparer(ctx, resourceGroupName, accountName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetReadOnlyKeys", nil, "Failure preparing request")
return
}
resp, err := client.GetReadOnlyKeysSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetReadOnlyKeys", resp, "Failure sending request")
return
}
result, err = client.GetReadOnlyKeysResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetReadOnlyKeys", resp, "Failure responding to request")
return
}
return
}
// GetReadOnlyKeysPreparer prepares the GetReadOnlyKeys request.
func (client DatabaseAccountsClient) GetReadOnlyKeysPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/readonlykeys", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetReadOnlyKeysSender sends the GetReadOnlyKeys request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetReadOnlyKeysSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetReadOnlyKeysResponder handles the response to the GetReadOnlyKeys request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetReadOnlyKeysResponder(resp *http.Response) (result DatabaseAccountListReadOnlyKeysResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetSQLContainer gets the SQL container under an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// containerName - cosmos DB container name.
func (client DatabaseAccountsClient) GetSQLContainer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, containerName string) (result SQLContainer, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetSQLContainer")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetSQLContainer", err.Error())
}
req, err := client.GetSQLContainerPreparer(ctx, resourceGroupName, accountName, databaseName, containerName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetSQLContainer", nil, "Failure preparing request")
return
}
resp, err := client.GetSQLContainerSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetSQLContainer", resp, "Failure sending request")
return
}
result, err = client.GetSQLContainerResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetSQLContainer", resp, "Failure responding to request")
return
}
return
}
// GetSQLContainerPreparer prepares the GetSQLContainer request.
func (client DatabaseAccountsClient) GetSQLContainerPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, containerName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"containerName": autorest.Encode("path", containerName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/sql/databases/{databaseName}/containers/{containerName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSQLContainerSender sends the GetSQLContainer request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetSQLContainerSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetSQLContainerResponder handles the response to the GetSQLContainer request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetSQLContainerResponder(resp *http.Response) (result SQLContainer, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetSQLContainerThroughput gets the RUs per second of the SQL container under an existing Azure Cosmos DB database
// account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// containerName - cosmos DB container name.
func (client DatabaseAccountsClient) GetSQLContainerThroughput(ctx context.Context, resourceGroupName string, accountName string, databaseName string, containerName string) (result Throughput, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetSQLContainerThroughput")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetSQLContainerThroughput", err.Error())
}
req, err := client.GetSQLContainerThroughputPreparer(ctx, resourceGroupName, accountName, databaseName, containerName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetSQLContainerThroughput", nil, "Failure preparing request")
return
}
resp, err := client.GetSQLContainerThroughputSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetSQLContainerThroughput", resp, "Failure sending request")
return
}
result, err = client.GetSQLContainerThroughputResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetSQLContainerThroughput", resp, "Failure responding to request")
return
}
return
}
// GetSQLContainerThroughputPreparer prepares the GetSQLContainerThroughput request.
func (client DatabaseAccountsClient) GetSQLContainerThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, containerName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"containerName": autorest.Encode("path", containerName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/sql/databases/{databaseName}/containers/{containerName}/settings/throughput", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSQLContainerThroughputSender sends the GetSQLContainerThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetSQLContainerThroughputSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetSQLContainerThroughputResponder handles the response to the GetSQLContainerThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetSQLContainerThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetSQLDatabase gets the SQL database under an existing Azure Cosmos DB database account with the provided name.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
func (client DatabaseAccountsClient) GetSQLDatabase(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (result SQLDatabase, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetSQLDatabase")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetSQLDatabase", err.Error())
}
req, err := client.GetSQLDatabasePreparer(ctx, resourceGroupName, accountName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetSQLDatabase", nil, "Failure preparing request")
return
}
resp, err := client.GetSQLDatabaseSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetSQLDatabase", resp, "Failure sending request")
return
}
result, err = client.GetSQLDatabaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetSQLDatabase", resp, "Failure responding to request")
return
}
return
}
// GetSQLDatabasePreparer prepares the GetSQLDatabase request.
func (client DatabaseAccountsClient) GetSQLDatabasePreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/sql/databases/{databaseName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSQLDatabaseSender sends the GetSQLDatabase request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetSQLDatabaseSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetSQLDatabaseResponder handles the response to the GetSQLDatabase request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetSQLDatabaseResponder(resp *http.Response) (result SQLDatabase, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetSQLDatabaseThroughput gets the RUs per second of the SQL database under an existing Azure Cosmos DB database
// account with the provided name.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
func (client DatabaseAccountsClient) GetSQLDatabaseThroughput(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (result Throughput, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetSQLDatabaseThroughput")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetSQLDatabaseThroughput", err.Error())
}
req, err := client.GetSQLDatabaseThroughputPreparer(ctx, resourceGroupName, accountName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetSQLDatabaseThroughput", nil, "Failure preparing request")
return
}
resp, err := client.GetSQLDatabaseThroughputSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetSQLDatabaseThroughput", resp, "Failure sending request")
return
}
result, err = client.GetSQLDatabaseThroughputResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetSQLDatabaseThroughput", resp, "Failure responding to request")
return
}
return
}
// GetSQLDatabaseThroughputPreparer prepares the GetSQLDatabaseThroughput request.
func (client DatabaseAccountsClient) GetSQLDatabaseThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/sql/databases/{databaseName}/settings/throughput", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSQLDatabaseThroughputSender sends the GetSQLDatabaseThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetSQLDatabaseThroughputSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetSQLDatabaseThroughputResponder handles the response to the GetSQLDatabaseThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetSQLDatabaseThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetTable gets the Tables under an existing Azure Cosmos DB database account with the provided name.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// tableName - cosmos DB table name.
func (client DatabaseAccountsClient) GetTable(ctx context.Context, resourceGroupName string, accountName string, tableName string) (result Table, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetTable")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetTable", err.Error())
}
req, err := client.GetTablePreparer(ctx, resourceGroupName, accountName, tableName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetTable", nil, "Failure preparing request")
return
}
resp, err := client.GetTableSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetTable", resp, "Failure sending request")
return
}
result, err = client.GetTableResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetTable", resp, "Failure responding to request")
return
}
return
}
// GetTablePreparer prepares the GetTable request.
func (client DatabaseAccountsClient) GetTablePreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/table/tables/{tableName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetTableSender sends the GetTable request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetTableSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetTableResponder handles the response to the GetTable request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetTableResponder(resp *http.Response) (result Table, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetTableThroughput gets the RUs per second of the Table under an existing Azure Cosmos DB database account with the
// provided name.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// tableName - cosmos DB table name.
func (client DatabaseAccountsClient) GetTableThroughput(ctx context.Context, resourceGroupName string, accountName string, tableName string) (result Throughput, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.GetTableThroughput")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "GetTableThroughput", err.Error())
}
req, err := client.GetTableThroughputPreparer(ctx, resourceGroupName, accountName, tableName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetTableThroughput", nil, "Failure preparing request")
return
}
resp, err := client.GetTableThroughputSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetTableThroughput", resp, "Failure sending request")
return
}
result, err = client.GetTableThroughputResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "GetTableThroughput", resp, "Failure responding to request")
return
}
return
}
// GetTableThroughputPreparer prepares the GetTableThroughput request.
func (client DatabaseAccountsClient) GetTableThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/table/tables/{tableName}/settings/throughput", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetTableThroughputSender sends the GetTableThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) GetTableThroughputSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetTableThroughputResponder handles the response to the GetTableThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) GetTableThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List lists all the Azure Cosmos DB database accounts available under the subscription.
func (client DatabaseAccountsClient) List(ctx context.Context) (result DatabaseAccountsListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.List")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "List", resp, "Failure sending request")
return
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "List", resp, "Failure responding to request")
return
}
return
}
// ListPreparer prepares the List request.
func (client DatabaseAccountsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/databaseAccounts", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListResponder(resp *http.Response) (result DatabaseAccountsListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByResourceGroup lists all the Azure Cosmos DB database accounts available under the given resource group.
// Parameters:
// resourceGroupName - name of an Azure resource group.
func (client DatabaseAccountsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result DatabaseAccountsListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListByResourceGroup")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListByResourceGroup", err.Error())
}
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListByResourceGroup", nil, "Failure preparing request")
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListByResourceGroup", resp, "Failure sending request")
return
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListByResourceGroup", resp, "Failure responding to request")
return
}
return
}
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
func (client DatabaseAccountsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListByResourceGroupResponder(resp *http.Response) (result DatabaseAccountsListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListCassandraKeyspaces lists the Cassandra keyspaces under an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
func (client DatabaseAccountsClient) ListCassandraKeyspaces(ctx context.Context, resourceGroupName string, accountName string) (result CassandraKeyspaceListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListCassandraKeyspaces")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListCassandraKeyspaces", err.Error())
}
req, err := client.ListCassandraKeyspacesPreparer(ctx, resourceGroupName, accountName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListCassandraKeyspaces", nil, "Failure preparing request")
return
}
resp, err := client.ListCassandraKeyspacesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListCassandraKeyspaces", resp, "Failure sending request")
return
}
result, err = client.ListCassandraKeyspacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListCassandraKeyspaces", resp, "Failure responding to request")
return
}
return
}
// ListCassandraKeyspacesPreparer prepares the ListCassandraKeyspaces request.
func (client DatabaseAccountsClient) ListCassandraKeyspacesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/cassandra/keyspaces", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListCassandraKeyspacesSender sends the ListCassandraKeyspaces request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListCassandraKeyspacesSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListCassandraKeyspacesResponder handles the response to the ListCassandraKeyspaces request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListCassandraKeyspacesResponder(resp *http.Response) (result CassandraKeyspaceListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListCassandraTables lists the Cassandra table under an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// keyspaceName - cosmos DB keyspace name.
func (client DatabaseAccountsClient) ListCassandraTables(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string) (result CassandraTableListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListCassandraTables")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListCassandraTables", err.Error())
}
req, err := client.ListCassandraTablesPreparer(ctx, resourceGroupName, accountName, keyspaceName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListCassandraTables", nil, "Failure preparing request")
return
}
resp, err := client.ListCassandraTablesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListCassandraTables", resp, "Failure sending request")
return
}
result, err = client.ListCassandraTablesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListCassandraTables", resp, "Failure responding to request")
return
}
return
}
// ListCassandraTablesPreparer prepares the ListCassandraTables request.
func (client DatabaseAccountsClient) ListCassandraTablesPreparer(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"keyspaceName": autorest.Encode("path", keyspaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/cassandra/keyspaces/{keyspaceName}/tables", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListCassandraTablesSender sends the ListCassandraTables request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListCassandraTablesSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListCassandraTablesResponder handles the response to the ListCassandraTables request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListCassandraTablesResponder(resp *http.Response) (result CassandraTableListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListConnectionStrings lists the connection strings for the specified Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
func (client DatabaseAccountsClient) ListConnectionStrings(ctx context.Context, resourceGroupName string, accountName string) (result DatabaseAccountListConnectionStringsResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListConnectionStrings")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListConnectionStrings", err.Error())
}
req, err := client.ListConnectionStringsPreparer(ctx, resourceGroupName, accountName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListConnectionStrings", nil, "Failure preparing request")
return
}
resp, err := client.ListConnectionStringsSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListConnectionStrings", resp, "Failure sending request")
return
}
result, err = client.ListConnectionStringsResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListConnectionStrings", resp, "Failure responding to request")
return
}
return
}
// ListConnectionStringsPreparer prepares the ListConnectionStrings request.
func (client DatabaseAccountsClient) ListConnectionStringsPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/listConnectionStrings", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListConnectionStringsSender sends the ListConnectionStrings request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListConnectionStringsSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListConnectionStringsResponder handles the response to the ListConnectionStrings request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListConnectionStringsResponder(resp *http.Response) (result DatabaseAccountListConnectionStringsResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListGremlinDatabases lists the Gremlin databases under an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
func (client DatabaseAccountsClient) ListGremlinDatabases(ctx context.Context, resourceGroupName string, accountName string) (result GremlinDatabaseListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListGremlinDatabases")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListGremlinDatabases", err.Error())
}
req, err := client.ListGremlinDatabasesPreparer(ctx, resourceGroupName, accountName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListGremlinDatabases", nil, "Failure preparing request")
return
}
resp, err := client.ListGremlinDatabasesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListGremlinDatabases", resp, "Failure sending request")
return
}
result, err = client.ListGremlinDatabasesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListGremlinDatabases", resp, "Failure responding to request")
return
}
return
}
// ListGremlinDatabasesPreparer prepares the ListGremlinDatabases request.
func (client DatabaseAccountsClient) ListGremlinDatabasesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/gremlin/databases", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListGremlinDatabasesSender sends the ListGremlinDatabases request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListGremlinDatabasesSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListGremlinDatabasesResponder handles the response to the ListGremlinDatabases request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListGremlinDatabasesResponder(resp *http.Response) (result GremlinDatabaseListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListGremlinGraphs lists the Gremlin graph under an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
func (client DatabaseAccountsClient) ListGremlinGraphs(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (result GremlinGraphListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListGremlinGraphs")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListGremlinGraphs", err.Error())
}
req, err := client.ListGremlinGraphsPreparer(ctx, resourceGroupName, accountName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListGremlinGraphs", nil, "Failure preparing request")
return
}
resp, err := client.ListGremlinGraphsSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListGremlinGraphs", resp, "Failure sending request")
return
}
result, err = client.ListGremlinGraphsResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListGremlinGraphs", resp, "Failure responding to request")
return
}
return
}
// ListGremlinGraphsPreparer prepares the ListGremlinGraphs request.
func (client DatabaseAccountsClient) ListGremlinGraphsPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/gremlin/databases/{databaseName}/graphs", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListGremlinGraphsSender sends the ListGremlinGraphs request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListGremlinGraphsSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListGremlinGraphsResponder handles the response to the ListGremlinGraphs request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListGremlinGraphsResponder(resp *http.Response) (result GremlinGraphListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListKeys lists the access keys for the specified Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
func (client DatabaseAccountsClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string) (result DatabaseAccountListKeysResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListKeys")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListKeys", err.Error())
}
req, err := client.ListKeysPreparer(ctx, resourceGroupName, accountName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListKeys", nil, "Failure preparing request")
return
}
resp, err := client.ListKeysSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListKeys", resp, "Failure sending request")
return
}
result, err = client.ListKeysResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListKeys", resp, "Failure responding to request")
return
}
return
}
// ListKeysPreparer prepares the ListKeys request.
func (client DatabaseAccountsClient) ListKeysPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/listKeys", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListKeysSender sends the ListKeys request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListKeysResponder handles the response to the ListKeys request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListKeysResponder(resp *http.Response) (result DatabaseAccountListKeysResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListMetricDefinitions retrieves metric definitions for the given database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
func (client DatabaseAccountsClient) ListMetricDefinitions(ctx context.Context, resourceGroupName string, accountName string) (result MetricDefinitionsListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListMetricDefinitions")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListMetricDefinitions", err.Error())
}
req, err := client.ListMetricDefinitionsPreparer(ctx, resourceGroupName, accountName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListMetricDefinitions", nil, "Failure preparing request")
return
}
resp, err := client.ListMetricDefinitionsSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListMetricDefinitions", resp, "Failure sending request")
return
}
result, err = client.ListMetricDefinitionsResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListMetricDefinitions", resp, "Failure responding to request")
return
}
return
}
// ListMetricDefinitionsPreparer prepares the ListMetricDefinitions request.
func (client DatabaseAccountsClient) ListMetricDefinitionsPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/metricDefinitions", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListMetricDefinitionsSender sends the ListMetricDefinitions request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListMetricDefinitionsSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListMetricDefinitionsResponder handles the response to the ListMetricDefinitions request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListMetricDefinitionsResponder(resp *http.Response) (result MetricDefinitionsListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListMetrics retrieves the metrics determined by the given filter for the given database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// filter - an OData filter expression that describes a subset of metrics to return. The parameters that can be
// filtered are name.value (name of the metric, can have an or of multiple names), startTime, endTime, and
// timeGrain. The supported operator is eq.
func (client DatabaseAccountsClient) ListMetrics(ctx context.Context, resourceGroupName string, accountName string, filter string) (result MetricListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListMetrics")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListMetrics", err.Error())
}
req, err := client.ListMetricsPreparer(ctx, resourceGroupName, accountName, filter)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListMetrics", nil, "Failure preparing request")
return
}
resp, err := client.ListMetricsSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListMetrics", resp, "Failure sending request")
return
}
result, err = client.ListMetricsResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListMetrics", resp, "Failure responding to request")
return
}
return
}
// ListMetricsPreparer prepares the ListMetrics request.
func (client DatabaseAccountsClient) ListMetricsPreparer(ctx context.Context, resourceGroupName string, accountName string, filter string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"$filter": autorest.Encode("query", filter),
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/metrics", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListMetricsSender sends the ListMetrics request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListMetricsSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListMetricsResponder handles the response to the ListMetrics request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListMetricsResponder(resp *http.Response) (result MetricListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListMongoDBCollections lists the MongoDB collection under an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
func (client DatabaseAccountsClient) ListMongoDBCollections(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (result MongoDBCollectionListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListMongoDBCollections")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListMongoDBCollections", err.Error())
}
req, err := client.ListMongoDBCollectionsPreparer(ctx, resourceGroupName, accountName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListMongoDBCollections", nil, "Failure preparing request")
return
}
resp, err := client.ListMongoDBCollectionsSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListMongoDBCollections", resp, "Failure sending request")
return
}
result, err = client.ListMongoDBCollectionsResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListMongoDBCollections", resp, "Failure responding to request")
return
}
return
}
// ListMongoDBCollectionsPreparer prepares the ListMongoDBCollections request.
func (client DatabaseAccountsClient) ListMongoDBCollectionsPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/mongodb/databases/{databaseName}/collections", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListMongoDBCollectionsSender sends the ListMongoDBCollections request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListMongoDBCollectionsSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListMongoDBCollectionsResponder handles the response to the ListMongoDBCollections request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListMongoDBCollectionsResponder(resp *http.Response) (result MongoDBCollectionListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListMongoDBDatabases lists the MongoDB databases under an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
func (client DatabaseAccountsClient) ListMongoDBDatabases(ctx context.Context, resourceGroupName string, accountName string) (result MongoDBDatabaseListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListMongoDBDatabases")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListMongoDBDatabases", err.Error())
}
req, err := client.ListMongoDBDatabasesPreparer(ctx, resourceGroupName, accountName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListMongoDBDatabases", nil, "Failure preparing request")
return
}
resp, err := client.ListMongoDBDatabasesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListMongoDBDatabases", resp, "Failure sending request")
return
}
result, err = client.ListMongoDBDatabasesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListMongoDBDatabases", resp, "Failure responding to request")
return
}
return
}
// ListMongoDBDatabasesPreparer prepares the ListMongoDBDatabases request.
func (client DatabaseAccountsClient) ListMongoDBDatabasesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/mongodb/databases", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListMongoDBDatabasesSender sends the ListMongoDBDatabases request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListMongoDBDatabasesSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListMongoDBDatabasesResponder handles the response to the ListMongoDBDatabases request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListMongoDBDatabasesResponder(resp *http.Response) (result MongoDBDatabaseListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListReadOnlyKeys lists the read-only access keys for the specified Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
func (client DatabaseAccountsClient) ListReadOnlyKeys(ctx context.Context, resourceGroupName string, accountName string) (result DatabaseAccountListReadOnlyKeysResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListReadOnlyKeys")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListReadOnlyKeys", err.Error())
}
req, err := client.ListReadOnlyKeysPreparer(ctx, resourceGroupName, accountName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListReadOnlyKeys", nil, "Failure preparing request")
return
}
resp, err := client.ListReadOnlyKeysSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListReadOnlyKeys", resp, "Failure sending request")
return
}
result, err = client.ListReadOnlyKeysResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListReadOnlyKeys", resp, "Failure responding to request")
return
}
return
}
// ListReadOnlyKeysPreparer prepares the ListReadOnlyKeys request.
func (client DatabaseAccountsClient) ListReadOnlyKeysPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/readonlykeys", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListReadOnlyKeysSender sends the ListReadOnlyKeys request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListReadOnlyKeysSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListReadOnlyKeysResponder handles the response to the ListReadOnlyKeys request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListReadOnlyKeysResponder(resp *http.Response) (result DatabaseAccountListReadOnlyKeysResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListSQLContainers lists the SQL container under an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
func (client DatabaseAccountsClient) ListSQLContainers(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (result SQLContainerListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListSQLContainers")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListSQLContainers", err.Error())
}
req, err := client.ListSQLContainersPreparer(ctx, resourceGroupName, accountName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListSQLContainers", nil, "Failure preparing request")
return
}
resp, err := client.ListSQLContainersSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListSQLContainers", resp, "Failure sending request")
return
}
result, err = client.ListSQLContainersResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListSQLContainers", resp, "Failure responding to request")
return
}
return
}
// ListSQLContainersPreparer prepares the ListSQLContainers request.
func (client DatabaseAccountsClient) ListSQLContainersPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/sql/databases/{databaseName}/containers", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSQLContainersSender sends the ListSQLContainers request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListSQLContainersSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListSQLContainersResponder handles the response to the ListSQLContainers request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListSQLContainersResponder(resp *http.Response) (result SQLContainerListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListSQLDatabases lists the SQL databases under an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
func (client DatabaseAccountsClient) ListSQLDatabases(ctx context.Context, resourceGroupName string, accountName string) (result SQLDatabaseListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListSQLDatabases")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListSQLDatabases", err.Error())
}
req, err := client.ListSQLDatabasesPreparer(ctx, resourceGroupName, accountName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListSQLDatabases", nil, "Failure preparing request")
return
}
resp, err := client.ListSQLDatabasesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListSQLDatabases", resp, "Failure sending request")
return
}
result, err = client.ListSQLDatabasesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListSQLDatabases", resp, "Failure responding to request")
return
}
return
}
// ListSQLDatabasesPreparer prepares the ListSQLDatabases request.
func (client DatabaseAccountsClient) ListSQLDatabasesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/sql/databases", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSQLDatabasesSender sends the ListSQLDatabases request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListSQLDatabasesSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListSQLDatabasesResponder handles the response to the ListSQLDatabases request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListSQLDatabasesResponder(resp *http.Response) (result SQLDatabaseListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListTables lists the Tables under an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
func (client DatabaseAccountsClient) ListTables(ctx context.Context, resourceGroupName string, accountName string) (result TableListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListTables")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListTables", err.Error())
}
req, err := client.ListTablesPreparer(ctx, resourceGroupName, accountName)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListTables", nil, "Failure preparing request")
return
}
resp, err := client.ListTablesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListTables", resp, "Failure sending request")
return
}
result, err = client.ListTablesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListTables", resp, "Failure responding to request")
return
}
return
}
// ListTablesPreparer prepares the ListTables request.
func (client DatabaseAccountsClient) ListTablesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/table/tables", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListTablesSender sends the ListTables request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListTablesSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListTablesResponder handles the response to the ListTables request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListTablesResponder(resp *http.Response) (result TableListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListUsages retrieves the usages (most recent data) for the given database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// filter - an OData filter expression that describes a subset of usages to return. The supported parameter is
// name.value (name of the metric, can have an or of multiple names).
func (client DatabaseAccountsClient) ListUsages(ctx context.Context, resourceGroupName string, accountName string, filter string) (result UsagesResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.ListUsages")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "ListUsages", err.Error())
}
req, err := client.ListUsagesPreparer(ctx, resourceGroupName, accountName, filter)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListUsages", nil, "Failure preparing request")
return
}
resp, err := client.ListUsagesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListUsages", resp, "Failure sending request")
return
}
result, err = client.ListUsagesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListUsages", resp, "Failure responding to request")
return
}
return
}
// ListUsagesPreparer prepares the ListUsages request.
func (client DatabaseAccountsClient) ListUsagesPreparer(ctx context.Context, resourceGroupName string, accountName string, filter string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(filter) > 0 {
queryParameters["$filter"] = autorest.Encode("query", filter)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/usages", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListUsagesSender sends the ListUsages request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) ListUsagesSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListUsagesResponder handles the response to the ListUsages request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) ListUsagesResponder(resp *http.Response) (result UsagesResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// OfflineRegion offline the specified region for the specified Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// regionParameterForOffline - cosmos DB region to offline for the database account.
func (client DatabaseAccountsClient) OfflineRegion(ctx context.Context, resourceGroupName string, accountName string, regionParameterForOffline RegionForOnlineOffline) (result DatabaseAccountsOfflineRegionFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.OfflineRegion")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: regionParameterForOffline,
Constraints: []validation.Constraint{{Target: "regionParameterForOffline.Region", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "OfflineRegion", err.Error())
}
req, err := client.OfflineRegionPreparer(ctx, resourceGroupName, accountName, regionParameterForOffline)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "OfflineRegion", nil, "Failure preparing request")
return
}
result, err = client.OfflineRegionSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "OfflineRegion", nil, "Failure sending request")
return
}
return
}
// OfflineRegionPreparer prepares the OfflineRegion request.
func (client DatabaseAccountsClient) OfflineRegionPreparer(ctx context.Context, resourceGroupName string, accountName string, regionParameterForOffline RegionForOnlineOffline) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/offlineRegion", pathParameters),
autorest.WithJSON(regionParameterForOffline),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// OfflineRegionSender sends the OfflineRegion request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) OfflineRegionSender(req *http.Request) (future DatabaseAccountsOfflineRegionFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// OfflineRegionResponder handles the response to the OfflineRegion request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) OfflineRegionResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByClosing())
result.Response = resp
return
}
// OnlineRegion online the specified region for the specified Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// regionParameterForOnline - cosmos DB region to online for the database account.
func (client DatabaseAccountsClient) OnlineRegion(ctx context.Context, resourceGroupName string, accountName string, regionParameterForOnline RegionForOnlineOffline) (result DatabaseAccountsOnlineRegionFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.OnlineRegion")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: regionParameterForOnline,
Constraints: []validation.Constraint{{Target: "regionParameterForOnline.Region", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "OnlineRegion", err.Error())
}
req, err := client.OnlineRegionPreparer(ctx, resourceGroupName, accountName, regionParameterForOnline)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "OnlineRegion", nil, "Failure preparing request")
return
}
result, err = client.OnlineRegionSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "OnlineRegion", nil, "Failure sending request")
return
}
return
}
// OnlineRegionPreparer prepares the OnlineRegion request.
func (client DatabaseAccountsClient) OnlineRegionPreparer(ctx context.Context, resourceGroupName string, accountName string, regionParameterForOnline RegionForOnlineOffline) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/onlineRegion", pathParameters),
autorest.WithJSON(regionParameterForOnline),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// OnlineRegionSender sends the OnlineRegion request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) OnlineRegionSender(req *http.Request) (future DatabaseAccountsOnlineRegionFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// OnlineRegionResponder handles the response to the OnlineRegion request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) OnlineRegionResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByClosing())
result.Response = resp
return
}
// Patch patches the properties of an existing Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// updateParameters - the tags parameter to patch for the current database account.
func (client DatabaseAccountsClient) Patch(ctx context.Context, resourceGroupName string, accountName string, updateParameters DatabaseAccountPatchParameters) (result DatabaseAccountsPatchFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.Patch")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "Patch", err.Error())
}
req, err := client.PatchPreparer(ctx, resourceGroupName, accountName, updateParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Patch", nil, "Failure preparing request")
return
}
result, err = client.PatchSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Patch", nil, "Failure sending request")
return
}
return
}
// PatchPreparer prepares the Patch request.
func (client DatabaseAccountsClient) PatchPreparer(ctx context.Context, resourceGroupName string, accountName string, updateParameters DatabaseAccountPatchParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}", pathParameters),
autorest.WithJSON(updateParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// PatchSender sends the Patch request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) PatchSender(req *http.Request) (future DatabaseAccountsPatchFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// PatchResponder handles the response to the Patch request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) PatchResponder(resp *http.Response) (result DatabaseAccount, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// RegenerateKey regenerates an access key for the specified Azure Cosmos DB database account.
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// keyToRegenerate - the name of the key to regenerate.
func (client DatabaseAccountsClient) RegenerateKey(ctx context.Context, resourceGroupName string, accountName string, keyToRegenerate DatabaseAccountRegenerateKeyParameters) (result DatabaseAccountsRegenerateKeyFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.RegenerateKey")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "RegenerateKey", err.Error())
}
req, err := client.RegenerateKeyPreparer(ctx, resourceGroupName, accountName, keyToRegenerate)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "RegenerateKey", nil, "Failure preparing request")
return
}
result, err = client.RegenerateKeySender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "RegenerateKey", nil, "Failure sending request")
return
}
return
}
// RegenerateKeyPreparer prepares the RegenerateKey request.
func (client DatabaseAccountsClient) RegenerateKeyPreparer(ctx context.Context, resourceGroupName string, accountName string, keyToRegenerate DatabaseAccountRegenerateKeyParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/regenerateKey", pathParameters),
autorest.WithJSON(keyToRegenerate),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// RegenerateKeySender sends the RegenerateKey request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) RegenerateKeySender(req *http.Request) (future DatabaseAccountsRegenerateKeyFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) RegenerateKeyResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByClosing())
result.Response = resp
return
}
// UpdateCassandraKeyspaceThroughput update RUs per second of an Azure Cosmos DB Cassandra Keyspace
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// keyspaceName - cosmos DB keyspace name.
// updateThroughputParameters - the RUs per second of the parameters to provide for the current Cassandra
// Keyspace.
func (client DatabaseAccountsClient) UpdateCassandraKeyspaceThroughput(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, updateThroughputParameters ThroughputUpdateParameters) (result DatabaseAccountsUpdateCassandraKeyspaceThroughputFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.UpdateCassandraKeyspaceThroughput")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: updateThroughputParameters,
Constraints: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource.Throughput", Name: validation.Null, Rule: true, Chain: nil}}},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "UpdateCassandraKeyspaceThroughput", err.Error())
}
req, err := client.UpdateCassandraKeyspaceThroughputPreparer(ctx, resourceGroupName, accountName, keyspaceName, updateThroughputParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateCassandraKeyspaceThroughput", nil, "Failure preparing request")
return
}
result, err = client.UpdateCassandraKeyspaceThroughputSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateCassandraKeyspaceThroughput", nil, "Failure sending request")
return
}
return
}
// UpdateCassandraKeyspaceThroughputPreparer prepares the UpdateCassandraKeyspaceThroughput request.
func (client DatabaseAccountsClient) UpdateCassandraKeyspaceThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, updateThroughputParameters ThroughputUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"keyspaceName": autorest.Encode("path", keyspaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/cassandra/keyspaces/{keyspaceName}/settings/throughput", pathParameters),
autorest.WithJSON(updateThroughputParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateCassandraKeyspaceThroughputSender sends the UpdateCassandraKeyspaceThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) UpdateCassandraKeyspaceThroughputSender(req *http.Request) (future DatabaseAccountsUpdateCassandraKeyspaceThroughputFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// UpdateCassandraKeyspaceThroughputResponder handles the response to the UpdateCassandraKeyspaceThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) UpdateCassandraKeyspaceThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// UpdateCassandraTableThroughput update RUs per second of an Azure Cosmos DB Cassandra table
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// keyspaceName - cosmos DB keyspace name.
// tableName - cosmos DB table name.
// updateThroughputParameters - the RUs per second of the parameters to provide for the current Cassandra
// table.
func (client DatabaseAccountsClient) UpdateCassandraTableThroughput(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, tableName string, updateThroughputParameters ThroughputUpdateParameters) (result DatabaseAccountsUpdateCassandraTableThroughputFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.UpdateCassandraTableThroughput")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: updateThroughputParameters,
Constraints: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource.Throughput", Name: validation.Null, Rule: true, Chain: nil}}},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "UpdateCassandraTableThroughput", err.Error())
}
req, err := client.UpdateCassandraTableThroughputPreparer(ctx, resourceGroupName, accountName, keyspaceName, tableName, updateThroughputParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateCassandraTableThroughput", nil, "Failure preparing request")
return
}
result, err = client.UpdateCassandraTableThroughputSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateCassandraTableThroughput", nil, "Failure sending request")
return
}
return
}
// UpdateCassandraTableThroughputPreparer prepares the UpdateCassandraTableThroughput request.
func (client DatabaseAccountsClient) UpdateCassandraTableThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, tableName string, updateThroughputParameters ThroughputUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"keyspaceName": autorest.Encode("path", keyspaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/cassandra/keyspaces/{keyspaceName}/tables/{tableName}/settings/throughput", pathParameters),
autorest.WithJSON(updateThroughputParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateCassandraTableThroughputSender sends the UpdateCassandraTableThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) UpdateCassandraTableThroughputSender(req *http.Request) (future DatabaseAccountsUpdateCassandraTableThroughputFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// UpdateCassandraTableThroughputResponder handles the response to the UpdateCassandraTableThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) UpdateCassandraTableThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// UpdateGremlinDatabaseThroughput update RUs per second of an Azure Cosmos DB Gremlin database
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// updateThroughputParameters - the RUs per second of the parameters to provide for the current Gremlin
// database.
func (client DatabaseAccountsClient) UpdateGremlinDatabaseThroughput(ctx context.Context, resourceGroupName string, accountName string, databaseName string, updateThroughputParameters ThroughputUpdateParameters) (result DatabaseAccountsUpdateGremlinDatabaseThroughputFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.UpdateGremlinDatabaseThroughput")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: updateThroughputParameters,
Constraints: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource.Throughput", Name: validation.Null, Rule: true, Chain: nil}}},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "UpdateGremlinDatabaseThroughput", err.Error())
}
req, err := client.UpdateGremlinDatabaseThroughputPreparer(ctx, resourceGroupName, accountName, databaseName, updateThroughputParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateGremlinDatabaseThroughput", nil, "Failure preparing request")
return
}
result, err = client.UpdateGremlinDatabaseThroughputSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateGremlinDatabaseThroughput", nil, "Failure sending request")
return
}
return
}
// UpdateGremlinDatabaseThroughputPreparer prepares the UpdateGremlinDatabaseThroughput request.
func (client DatabaseAccountsClient) UpdateGremlinDatabaseThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, updateThroughputParameters ThroughputUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/gremlin/databases/{databaseName}/settings/throughput", pathParameters),
autorest.WithJSON(updateThroughputParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateGremlinDatabaseThroughputSender sends the UpdateGremlinDatabaseThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) UpdateGremlinDatabaseThroughputSender(req *http.Request) (future DatabaseAccountsUpdateGremlinDatabaseThroughputFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// UpdateGremlinDatabaseThroughputResponder handles the response to the UpdateGremlinDatabaseThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) UpdateGremlinDatabaseThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// UpdateGremlinGraphThroughput update RUs per second of an Azure Cosmos DB Gremlin graph
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// graphName - cosmos DB graph name.
// updateThroughputParameters - the RUs per second of the parameters to provide for the current Gremlin graph.
func (client DatabaseAccountsClient) UpdateGremlinGraphThroughput(ctx context.Context, resourceGroupName string, accountName string, databaseName string, graphName string, updateThroughputParameters ThroughputUpdateParameters) (result DatabaseAccountsUpdateGremlinGraphThroughputFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.UpdateGremlinGraphThroughput")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: updateThroughputParameters,
Constraints: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource.Throughput", Name: validation.Null, Rule: true, Chain: nil}}},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "UpdateGremlinGraphThroughput", err.Error())
}
req, err := client.UpdateGremlinGraphThroughputPreparer(ctx, resourceGroupName, accountName, databaseName, graphName, updateThroughputParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateGremlinGraphThroughput", nil, "Failure preparing request")
return
}
result, err = client.UpdateGremlinGraphThroughputSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateGremlinGraphThroughput", nil, "Failure sending request")
return
}
return
}
// UpdateGremlinGraphThroughputPreparer prepares the UpdateGremlinGraphThroughput request.
func (client DatabaseAccountsClient) UpdateGremlinGraphThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, graphName string, updateThroughputParameters ThroughputUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"graphName": autorest.Encode("path", graphName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/gremlin/databases/{databaseName}/graphs/{graphName}/settings/throughput", pathParameters),
autorest.WithJSON(updateThroughputParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateGremlinGraphThroughputSender sends the UpdateGremlinGraphThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) UpdateGremlinGraphThroughputSender(req *http.Request) (future DatabaseAccountsUpdateGremlinGraphThroughputFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// UpdateGremlinGraphThroughputResponder handles the response to the UpdateGremlinGraphThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) UpdateGremlinGraphThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// UpdateMongoDBCollectionThroughput update the RUs per second of an Azure Cosmos DB MongoDB collection
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// collectionName - cosmos DB collection name.
// updateThroughputParameters - the RUs per second of the parameters to provide for the current MongoDB
// collection.
func (client DatabaseAccountsClient) UpdateMongoDBCollectionThroughput(ctx context.Context, resourceGroupName string, accountName string, databaseName string, collectionName string, updateThroughputParameters ThroughputUpdateParameters) (result DatabaseAccountsUpdateMongoDBCollectionThroughputFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.UpdateMongoDBCollectionThroughput")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: updateThroughputParameters,
Constraints: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource.Throughput", Name: validation.Null, Rule: true, Chain: nil}}},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "UpdateMongoDBCollectionThroughput", err.Error())
}
req, err := client.UpdateMongoDBCollectionThroughputPreparer(ctx, resourceGroupName, accountName, databaseName, collectionName, updateThroughputParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateMongoDBCollectionThroughput", nil, "Failure preparing request")
return
}
result, err = client.UpdateMongoDBCollectionThroughputSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateMongoDBCollectionThroughput", nil, "Failure sending request")
return
}
return
}
// UpdateMongoDBCollectionThroughputPreparer prepares the UpdateMongoDBCollectionThroughput request.
func (client DatabaseAccountsClient) UpdateMongoDBCollectionThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, collectionName string, updateThroughputParameters ThroughputUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"collectionName": autorest.Encode("path", collectionName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/mongodb/databases/{databaseName}/collections/{collectionName}/settings/throughput", pathParameters),
autorest.WithJSON(updateThroughputParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateMongoDBCollectionThroughputSender sends the UpdateMongoDBCollectionThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) UpdateMongoDBCollectionThroughputSender(req *http.Request) (future DatabaseAccountsUpdateMongoDBCollectionThroughputFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// UpdateMongoDBCollectionThroughputResponder handles the response to the UpdateMongoDBCollectionThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) UpdateMongoDBCollectionThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// UpdateMongoDBDatabaseThroughput update RUs per second of the an Azure Cosmos DB MongoDB database
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// updateThroughputParameters - the RUs per second of the parameters to provide for the current MongoDB
// database.
func (client DatabaseAccountsClient) UpdateMongoDBDatabaseThroughput(ctx context.Context, resourceGroupName string, accountName string, databaseName string, updateThroughputParameters ThroughputUpdateParameters) (result DatabaseAccountsUpdateMongoDBDatabaseThroughputFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.UpdateMongoDBDatabaseThroughput")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: updateThroughputParameters,
Constraints: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource.Throughput", Name: validation.Null, Rule: true, Chain: nil}}},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "UpdateMongoDBDatabaseThroughput", err.Error())
}
req, err := client.UpdateMongoDBDatabaseThroughputPreparer(ctx, resourceGroupName, accountName, databaseName, updateThroughputParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateMongoDBDatabaseThroughput", nil, "Failure preparing request")
return
}
result, err = client.UpdateMongoDBDatabaseThroughputSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateMongoDBDatabaseThroughput", nil, "Failure sending request")
return
}
return
}
// UpdateMongoDBDatabaseThroughputPreparer prepares the UpdateMongoDBDatabaseThroughput request.
func (client DatabaseAccountsClient) UpdateMongoDBDatabaseThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, updateThroughputParameters ThroughputUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/mongodb/databases/{databaseName}/settings/throughput", pathParameters),
autorest.WithJSON(updateThroughputParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateMongoDBDatabaseThroughputSender sends the UpdateMongoDBDatabaseThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) UpdateMongoDBDatabaseThroughputSender(req *http.Request) (future DatabaseAccountsUpdateMongoDBDatabaseThroughputFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// UpdateMongoDBDatabaseThroughputResponder handles the response to the UpdateMongoDBDatabaseThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) UpdateMongoDBDatabaseThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// UpdateSQLContainerThroughput update RUs per second of an Azure Cosmos DB SQL container
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// containerName - cosmos DB container name.
// updateThroughputParameters - the parameters to provide for the RUs per second of the current SQL container.
func (client DatabaseAccountsClient) UpdateSQLContainerThroughput(ctx context.Context, resourceGroupName string, accountName string, databaseName string, containerName string, updateThroughputParameters ThroughputUpdateParameters) (result DatabaseAccountsUpdateSQLContainerThroughputFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.UpdateSQLContainerThroughput")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: updateThroughputParameters,
Constraints: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource.Throughput", Name: validation.Null, Rule: true, Chain: nil}}},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "UpdateSQLContainerThroughput", err.Error())
}
req, err := client.UpdateSQLContainerThroughputPreparer(ctx, resourceGroupName, accountName, databaseName, containerName, updateThroughputParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateSQLContainerThroughput", nil, "Failure preparing request")
return
}
result, err = client.UpdateSQLContainerThroughputSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateSQLContainerThroughput", nil, "Failure sending request")
return
}
return
}
// UpdateSQLContainerThroughputPreparer prepares the UpdateSQLContainerThroughput request.
func (client DatabaseAccountsClient) UpdateSQLContainerThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, containerName string, updateThroughputParameters ThroughputUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"containerName": autorest.Encode("path", containerName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/sql/databases/{databaseName}/containers/{containerName}/settings/throughput", pathParameters),
autorest.WithJSON(updateThroughputParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSQLContainerThroughputSender sends the UpdateSQLContainerThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) UpdateSQLContainerThroughputSender(req *http.Request) (future DatabaseAccountsUpdateSQLContainerThroughputFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// UpdateSQLContainerThroughputResponder handles the response to the UpdateSQLContainerThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) UpdateSQLContainerThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// UpdateSQLDatabaseThroughput update RUs per second of an Azure Cosmos DB SQL database
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// databaseName - cosmos DB database name.
// updateThroughputParameters - the parameters to provide for the RUs per second of the current SQL database.
func (client DatabaseAccountsClient) UpdateSQLDatabaseThroughput(ctx context.Context, resourceGroupName string, accountName string, databaseName string, updateThroughputParameters ThroughputUpdateParameters) (result DatabaseAccountsUpdateSQLDatabaseThroughputFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.UpdateSQLDatabaseThroughput")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: updateThroughputParameters,
Constraints: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource.Throughput", Name: validation.Null, Rule: true, Chain: nil}}},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "UpdateSQLDatabaseThroughput", err.Error())
}
req, err := client.UpdateSQLDatabaseThroughputPreparer(ctx, resourceGroupName, accountName, databaseName, updateThroughputParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateSQLDatabaseThroughput", nil, "Failure preparing request")
return
}
result, err = client.UpdateSQLDatabaseThroughputSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateSQLDatabaseThroughput", nil, "Failure sending request")
return
}
return
}
// UpdateSQLDatabaseThroughputPreparer prepares the UpdateSQLDatabaseThroughput request.
func (client DatabaseAccountsClient) UpdateSQLDatabaseThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, databaseName string, updateThroughputParameters ThroughputUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/sql/databases/{databaseName}/settings/throughput", pathParameters),
autorest.WithJSON(updateThroughputParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSQLDatabaseThroughputSender sends the UpdateSQLDatabaseThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) UpdateSQLDatabaseThroughputSender(req *http.Request) (future DatabaseAccountsUpdateSQLDatabaseThroughputFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// UpdateSQLDatabaseThroughputResponder handles the response to the UpdateSQLDatabaseThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) UpdateSQLDatabaseThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// UpdateTableThroughput update RUs per second of an Azure Cosmos DB Table
// Parameters:
// resourceGroupName - name of an Azure resource group.
// accountName - cosmos DB database account name.
// tableName - cosmos DB table name.
// updateThroughputParameters - the parameters to provide for the RUs per second of the current Table.
func (client DatabaseAccountsClient) UpdateTableThroughput(ctx context.Context, resourceGroupName string, accountName string, tableName string, updateThroughputParameters ThroughputUpdateParameters) (result DatabaseAccountsUpdateTableThroughputFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DatabaseAccountsClient.UpdateTableThroughput")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: accountName,
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "accountName", Name: validation.Pattern, Rule: `^[a-z0-9]+(-[a-z0-9]+)*`, Chain: nil}}},
{TargetValue: updateThroughputParameters,
Constraints: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "updateThroughputParameters.ThroughputUpdateProperties.Resource.Throughput", Name: validation.Null, Rule: true, Chain: nil}}},
}}}}}); err != nil {
return result, validation.NewError("documentdb.DatabaseAccountsClient", "UpdateTableThroughput", err.Error())
}
req, err := client.UpdateTableThroughputPreparer(ctx, resourceGroupName, accountName, tableName, updateThroughputParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateTableThroughput", nil, "Failure preparing request")
return
}
result, err = client.UpdateTableThroughputSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "UpdateTableThroughput", nil, "Failure sending request")
return
}
return
}
// UpdateTableThroughputPreparer prepares the UpdateTableThroughput request.
func (client DatabaseAccountsClient) UpdateTableThroughputPreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string, updateThroughputParameters ThroughputUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"accountName": autorest.Encode("path", accountName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2015-04-08"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/table/tables/{tableName}/settings/throughput", pathParameters),
autorest.WithJSON(updateThroughputParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateTableThroughputSender sends the UpdateTableThroughput request. The method will close the
// http.Response Body if it receives an error.
func (client DatabaseAccountsClient) UpdateTableThroughputSender(req *http.Request) (future DatabaseAccountsUpdateTableThroughputFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// UpdateTableThroughputResponder handles the response to the UpdateTableThroughput request. The method always
// closes the http.Response Body.
func (client DatabaseAccountsClient) UpdateTableThroughputResponder(resp *http.Response) (result Throughput, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
| NewDatabaseAccountsClient |
http_app.rs | use std::sync::mpsc::Receiver;
struct Resource {
/// HTTP response
response: ehttp::Response,
text: Option<String>,
/// If set, the response was an image.
image: Option<Image>,
/// If set, the response was text with some supported syntax highlighting (e.g. ".rs" or ".md").
colored_text: Option<ColoredText>,
}
impl Resource {
fn from_response(response: ehttp::Response) -> Self {
let content_type = response.content_type().unwrap_or_default();
let image = if content_type.starts_with("image/") {
Image::decode(&response.bytes)
} else {
None
};
let text = response.text();
let colored_text = text
.as_ref()
.and_then(|text| syntax_highlighting(&response, text));
Self {
response,
text,
image,
colored_text,
}
}
}
#[derive(Debug, PartialEq, Copy, Clone)]
#[cfg_attr(feature = "persistence", derive(serde::Deserialize, serde::Serialize))]
enum Method {
Get,
Post,
}
#[cfg_attr(feature = "persistence", derive(serde::Deserialize, serde::Serialize))]
pub struct HttpApp {
url: String,
method: Method,
request_body: String,
#[cfg_attr(feature = "persistence", serde(skip))]
in_progress: Option<Receiver<Result<ehttp::Response, String>>>,
#[cfg_attr(feature = "persistence", serde(skip))]
result: Option<Result<Resource, String>>,
#[cfg_attr(feature = "persistence", serde(skip))]
tex_mngr: TexMngr,
}
impl Default for HttpApp {
fn default() -> Self {
Self {
url: "https://raw.githubusercontent.com/emilk/egui/master/README.md".to_owned(),
method: Method::Get,
request_body: r#"["posting some json", { "more_json" : true }]"#.to_owned(),
in_progress: Default::default(),
result: Default::default(),
tex_mngr: Default::default(),
}
}
}
impl epi::App for HttpApp {
fn name(&self) -> &str {
"⬇ HTTP"
}
/// Called each time the UI needs repainting, which may be many times per second.
/// Put your widgets into a `SidePanel`, `TopBottomPanel`, `CentralPanel`, `Window` or `Area`.
fn update(&mut self, ctx: &egui::CtxRef, frame: &mut epi::Frame<'_>) {
if let Some(receiver) = &mut self.in_progress {
// Are we there yet?
if let Ok(result) = receiver.try_recv() {
self.in_progress = None;
self.result = Some(result.map(Resource::from_response));
}
}
egui::CentralPanel::default().show(ctx, |ui| {
ui.heading("HTTP Fetch Example");
ui.horizontal(|ui| {
ui.spacing_mut().item_spacing.x = 0.0;
ui.label("HTTP requests made using ");
ui.hyperlink_to("ehttp", "https://www.github.com/emilk/ehttp");
ui.label(".");
});
ui.add(egui::github_link_file!(
"https://github.com/emilk/egui/blob/master/",
"(demo source code)"
));
if let Some(request) = ui_url(
ui,
frame,
&mut self.url,
&mut self.method,
&mut self.request_body,
) {
let repaint_signal = frame.repaint_signal();
let (sender, receiver) = std::sync::mpsc::channel();
self.in_progress = Some(receiver);
ehttp::fetch(request, move |response| {
sender.send(response).ok();
repaint_signal.request_repaint();
});
}
ui.separator();
if self.in_progress.is_some() {
ui.label("Please wait...");
} else if let Some(result) = &self.result {
match result {
Ok(resource) => {
ui_resource(ui, frame, &mut self.tex_mngr, resource);
}
Err(error) => {
// This should only happen if the fetch API isn't available or something similar.
ui.add(
egui::Label::new(if error.is_empty() { "Error" } else { error })
.text_color(egui::Color32::RED),
);
}
}
}
});
}
}
fn ui | ui: &mut egui::Ui,
frame: &mut epi::Frame<'_>,
url: &mut String,
method: &mut Method,
request_body: &mut String,
) -> Option<ehttp::Request> {
let mut trigger_fetch = false;
egui::Grid::new("request_params").show(ui, |ui| {
ui.label("URL:");
ui.horizontal(|ui| {
trigger_fetch |= ui.text_edit_singleline(url).lost_focus();
egui::ComboBox::from_id_source("method")
.selected_text(format!("{:?}", method))
.width(60.0)
.show_ui(ui, |ui| {
ui.selectable_value(method, Method::Get, "GET");
ui.selectable_value(method, Method::Post, "POST");
});
trigger_fetch |= ui.button("▶").clicked();
});
ui.end_row();
if *method == Method::Post {
ui.label("Body:");
ui.add(
egui::TextEdit::multiline(request_body)
.code_editor()
.desired_rows(1),
);
ui.end_row();
}
});
if frame.is_web() {
ui.label("HINT: paste the url of this page into the field above!");
}
ui.horizontal(|ui| {
if ui.button("Source code for this example").clicked() {
*url = format!(
"https://raw.githubusercontent.com/emilk/egui/master/{}",
file!()
);
*method = Method::Get;
trigger_fetch = true;
}
if ui.button("Random image").clicked() {
let seed = ui.input().time;
let width = 640;
let height = 480;
*url = format!("https://picsum.photos/seed/{}/{}/{}", seed, width, height);
*method = Method::Get;
trigger_fetch = true;
}
if ui.button("Post to httpbin.org").clicked() {
*url = "https://httpbin.org/post".to_owned();
*method = Method::Post;
trigger_fetch = true;
}
});
if trigger_fetch {
Some(match *method {
Method::Get => ehttp::Request::get(url),
Method::Post => ehttp::Request::post(url, request_body),
})
} else {
None
}
}
fn ui_resource(
ui: &mut egui::Ui,
frame: &mut epi::Frame<'_>,
tex_mngr: &mut TexMngr,
resource: &Resource,
) {
let Resource {
response,
text,
image,
colored_text,
} = resource;
ui.monospace(format!("url: {}", response.url));
ui.monospace(format!(
"status: {} ({})",
response.status, response.status_text
));
ui.monospace(format!(
"content-type: {}",
response.content_type().unwrap_or_default()
));
ui.monospace(format!(
"size: {:.1} kB",
response.bytes.len() as f32 / 1000.0
));
ui.separator();
egui::CollapsingHeader::new("Response headers")
.default_open(false)
.show(ui, |ui| {
egui::Grid::new("response_headers")
.spacing(egui::vec2(ui.spacing().item_spacing.x * 2.0, 0.0))
.show(ui, |ui| {
for header in &response.headers {
ui.label(header.0);
ui.label(header.1);
ui.end_row();
}
})
});
ui.separator();
if let Some(text) = &text {
let tooltip = "Click to copy the response body";
if ui.button("📋").on_hover_text(tooltip).clicked() {
ui.output().copied_text = text.clone();
}
}
ui.separator();
egui::ScrollArea::vertical().show(ui, |ui| {
if let Some(image) = image {
if let Some(texture_id) = tex_mngr.texture(frame, &response.url, image) {
let size = egui::Vec2::new(image.size.0 as f32, image.size.1 as f32);
ui.image(texture_id, size);
}
} else if let Some(colored_text) = colored_text {
colored_text.ui(ui);
} else if let Some(text) = &text {
ui.monospace(text);
} else {
ui.monospace("[binary]");
}
});
}
// ----------------------------------------------------------------------------
// Syntax highlighting:
#[cfg(feature = "syntect")]
fn syntax_highlighting(response: &ehttp::Response, text: &str) -> Option<ColoredText> {
let extension_and_rest: Vec<&str> = response.url.rsplitn(2, '.').collect();
let extension = extension_and_rest.get(0)?;
ColoredText::text_with_extension(text, extension)
}
/// Lines of text fragments
#[cfg(feature = "syntect")]
struct ColoredText(egui::text::LayoutJob);
#[cfg(feature = "syntect")]
impl ColoredText {
/// e.g. `text_with_extension("fn foo() {}", "rs")`
pub fn text_with_extension(text: &str, extension: &str) -> Option<ColoredText> {
use syntect::easy::HighlightLines;
use syntect::highlighting::FontStyle;
use syntect::highlighting::ThemeSet;
use syntect::parsing::SyntaxSet;
use syntect::util::LinesWithEndings;
let ps = SyntaxSet::load_defaults_newlines(); // should be cached and reused
let ts = ThemeSet::load_defaults(); // should be cached and reused
let syntax = ps.find_syntax_by_extension(extension)?;
let dark_mode = true;
let theme = if dark_mode {
"base16-mocha.dark"
} else {
"base16-ocean.light"
};
let mut h = HighlightLines::new(syntax, &ts.themes[theme]);
use egui::text::{LayoutJob, LayoutSection, TextFormat};
let mut job = LayoutJob {
text: text.into(),
..Default::default()
};
for line in LinesWithEndings::from(text) {
for (style, range) in h.highlight(line, &ps) {
let fg = style.foreground;
let text_color = egui::Color32::from_rgb(fg.r, fg.g, fg.b);
let italics = style.font_style.contains(FontStyle::ITALIC);
let underline = style.font_style.contains(FontStyle::ITALIC);
let underline = if underline {
egui::Stroke::new(1.0, text_color)
} else {
egui::Stroke::none()
};
job.sections.push(LayoutSection {
leading_space: 0.0,
byte_range: as_byte_range(text, range),
format: TextFormat {
style: egui::TextStyle::Monospace,
color: text_color,
italics,
underline,
..Default::default()
},
});
}
}
Some(ColoredText(job))
}
pub fn ui(&self, ui: &mut egui::Ui) {
let mut job = self.0.clone();
job.wrap_width = ui.available_width();
let galley = ui.fonts().layout_job(job);
let (response, painter) = ui.allocate_painter(galley.size(), egui::Sense::hover());
painter.add(egui::Shape::galley(response.rect.min, galley));
}
}
#[cfg(feature = "syntect")]
fn as_byte_range(whole: &str, range: &str) -> std::ops::Range<usize> {
let whole_start = whole.as_ptr() as usize;
let range_start = range.as_ptr() as usize;
assert!(whole_start <= range_start);
assert!(range_start + range.len() <= whole_start + whole.len());
let offset = range_start - whole_start;
offset..(offset + range.len())
}
#[cfg(not(feature = "syntect"))]
fn syntax_highlighting(_: &ehttp::Response, _: &str) -> Option<ColoredText> {
None
}
#[cfg(not(feature = "syntect"))]
struct ColoredText();
#[cfg(not(feature = "syntect"))]
impl ColoredText {
pub fn ui(&self, _ui: &mut egui::Ui) {}
}
// ----------------------------------------------------------------------------
// Texture/image handling is very manual at the moment.
/// Immediate mode texture manager that supports at most one texture at the time :)
#[derive(Default)]
struct TexMngr {
loaded_url: String,
texture_id: Option<egui::TextureId>,
}
impl TexMngr {
fn texture(
&mut self,
frame: &mut epi::Frame<'_>,
url: &str,
image: &Image,
) -> Option<egui::TextureId> {
if self.loaded_url != url {
if let Some(texture_id) = self.texture_id.take() {
frame.tex_allocator().free(texture_id);
}
self.texture_id = Some(
frame
.tex_allocator()
.alloc_srgba_premultiplied(image.size, &image.pixels),
);
self.loaded_url = url.to_owned();
}
self.texture_id
}
}
struct Image {
size: (usize, usize),
pixels: Vec<egui::Color32>,
}
impl Image {
fn decode(bytes: &[u8]) -> Option<Image> {
use image::GenericImageView;
let image = image::load_from_memory(bytes).ok()?;
let image_buffer = image.to_rgba8();
let size = (image.width() as usize, image.height() as usize);
let pixels = image_buffer.into_vec();
assert_eq!(size.0 * size.1 * 4, pixels.len());
let pixels = pixels
.chunks(4)
.map(|p| egui::Color32::from_rgba_unmultiplied(p[0], p[1], p[2], p[3]))
.collect();
Some(Image { size, pixels })
}
}
| _url(
|
jwt.strategy.ts | import { ExtractJwt, Strategy } from 'passport-jwt'; // TODO: remove passport as dependency
import { Injectable } from '@nestjs/common';
import { PassportStrategy } from '@nestjs/passport';
import { JWTPayload } from '../interfaces/jwt-payload.interface';
import { AuthService } from '../auth.service';
@Injectable()
export class JwtStrategy extends PassportStrategy(Strategy) {
constructor(private readonly authService: AuthService) {
super({
jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(),
secretOrKey: process.env.SECRET_KEY_JWT, // TODO: use vars from config module
});
}
async validate(payload: JWTPayload) {
const user = await this.authService.validateUserByJwt(payload); | return user;
}
} | |
0006_auto_20190723_1445.py | # Generated by Django 2.2.3 on 2019-07-23 14:45
from django.db import migrations, models
class | (migrations.Migration):
dependencies = [
('inception', '0005_auto_20190723_0810'),
]
operations = [
migrations.AlterField(
model_name='busstation',
name='cost',
field=models.FloatField(null=True),
),
]
| Migration |
print-file.rs | //! Prints a file given as an argument to stdout.
use std::env::args;
use std::io::Write;
use async_std::fs::File;
use async_std::io;
use async_std::prelude::*;
use async_std::task;
const LEN: usize = 16 * 1024; // 16 Kb
fn | () -> io::Result<()> {
let path = args().nth(1).expect("missing path argument");
task::block_on(async {
let mut file = File::open(&path).await?;
let mut stdout = std::io::stdout();
let mut buf = vec![0u8; LEN];
loop {
// Read a buffer from the file.
let n = file.read(&mut buf).await?;
// If this is the end of file, clean up and return.
if n == 0 {
stdout.flush()?;
return Ok(());
}
// Write the buffer into stdout.
stdout.write_all(&buf[..n])?;
}
})
}
| main |
storage.js | // == BSD2 LICENSE ==
// Copyright (c) 2014, Tidepool Project
//
// This program is free software; you can redistribute it and/or modify it under
// the terms of the associated License, which is identical to the BSD 2-Clause
// License as published by the Open Source Initiative at opensource.org.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the License for more details.
//
// You should have received a copy of the License along with this program; if
// not, you can obtain one from Tidepool Project at tidepool.org.
// == BSD2 LICENSE ==
import _ from 'lodash';
/**
* Create our the store we will be using
*
* @param {Object} options
* @param {Object} options.ourStore the storage system we are using
*/
module.exports = (options) => ({
init: (data, cb) => {
const result = _.reduce(data, (res, defaultValue, key) => {
let value = options.ourStore.getItem(key);
if (value == null) {
value = defaultValue;
} | }, {});
cb(result);
},
getItem: options.ourStore.getItem.bind(options.ourStore),
setItem: options.ourStore.setItem.bind(options.ourStore),
removeItem: options.ourStore.removeItem.bind(options.ourStore),
}); | res[key] = value;
return res; |
main.go | package main
import (
"context"
"errors"
"flag"
"fmt"
"os"
"runtime"
"github.com/spf13/pflag"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
monitoringv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/integr8ly/integreatly-operator/pkg/apis"
integreatlyv1alpha1 "github.com/integr8ly/integreatly-operator/pkg/apis/integreatly/v1alpha1"
"github.com/integr8ly/integreatly-operator/pkg/controller"
integreatlymetrics "github.com/integr8ly/integreatly-operator/pkg/metrics"
"github.com/integr8ly/integreatly-operator/pkg/webhooks"
"github.com/integr8ly/integreatly-operator/version"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"sigs.k8s.io/controller-runtime/pkg/client/config"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
customMetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"github.com/operator-framework/operator-sdk/pkg/k8sutil"
kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics"
"github.com/operator-framework/operator-sdk/pkg/leader"
"github.com/operator-framework/operator-sdk/pkg/log/zap"
"github.com/operator-framework/operator-sdk/pkg/metrics"
sdkVersion "github.com/operator-framework/operator-sdk/version"
)
// Change below variables to serve metrics on different host or port.
var (
metricsHost = "0.0.0.0"
metricsPort int32 = 8383
operatorMetricsPort int32 = 8686
)
var log = logf.Log.WithName("cmd")
func | () {
// Register custom metrics with the global prometheus registry
customMetrics.Registry.MustRegister(integreatlymetrics.OperatorVersion)
customMetrics.Registry.MustRegister(integreatlymetrics.RHMIStatusAvailable)
customMetrics.Registry.MustRegister(integreatlymetrics.RHMIInfo)
customMetrics.Registry.MustRegister(integreatlymetrics.RHMIVersion)
customMetrics.Registry.MustRegister(integreatlymetrics.RHMIStatus)
integreatlymetrics.OperatorVersion.Add(1)
}
func printVersion() {
log.Info(fmt.Sprintf("Operator Version: %s", version.GetVersion()))
log.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
}
func main() {
// Add the zap logger flag set to the CLI. The flag set must
// be added before calling pflag.Parse().
pflag.CommandLine.AddFlagSet(zap.FlagSet())
// Add flags registered by imported packages (e.g. glog and
// controller-runtime)
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
// Use a zap logr.Logger implementation. If none of the zap
// flags are configured (or if the zap flag set is not being
// used), this defaults to a production zap logger.
//
// The logger instantiated here can be changed to any logger
// implementing the logr.Logger interface. This logger will
// be propagated through the whole operator, generating
// uniform and structured logs.
logf.SetLogger(zap.Logger())
printVersion()
namespace, err := k8sutil.GetWatchNamespace()
if err != nil {
log.Error(err, "Failed to get watch namespace")
os.Exit(1)
}
// Get a config to talk to the apiserver
cfg, err := config.GetConfig()
if err != nil {
log.Error(err, "")
os.Exit(1)
}
ctx := context.TODO()
// Become the leader before proceeding
err = leader.Become(ctx, "rhmi-operator-lock")
if err != nil {
log.Error(err, "")
os.Exit(1)
}
// Create a new Cmd to provide shared dependencies and start components
mgr, err := manager.New(cfg, manager.Options{
Namespace: namespace,
MapperProvider: apiutil.NewDiscoveryRESTMapper,
MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
})
if err != nil {
log.Error(err, "")
os.Exit(1)
}
log.Info("Registering Components.")
// Setup Scheme for all resources
if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
log.Error(err, "")
os.Exit(1)
}
// Add monitoring resources
if err := monitoringv1.AddToScheme(mgr.GetScheme()); err != nil {
log.Error(err, "")
os.Exit(1)
}
// Setup all Controllers
if err := controller.AddToManager(mgr); err != nil {
log.Error(err, "")
os.Exit(1)
}
// Add the Metrics Service
addMetrics(ctx, cfg)
// Start up the wehook server
if err := setupWebhooks(mgr); err != nil {
log.Error(err, "Error setting up webhook server")
}
log.Info("Starting the Cmd.")
// Start the Cmd
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
log.Error(err, "Manager exited non-zero")
os.Exit(1)
}
}
// addMetrics will create the Services and Service Monitors to allow the operator export the metrics by using
// the Prometheus operator
func addMetrics(ctx context.Context, cfg *rest.Config) {
// Get the namespace the operator is currently deployed in.
operatorNs, err := k8sutil.GetOperatorNamespace()
if err != nil {
if errors.Is(err, k8sutil.ErrRunLocal) {
log.Info("Skipping CR metrics server creation; not running in a cluster.")
return
}
}
if err := serveCRMetrics(cfg, operatorNs); err != nil {
log.Info("Could not generate and serve custom resource metrics", "error", err.Error())
}
// Add to the below struct any other metrics ports you want to expose.
servicePorts := []corev1.ServicePort{
{Port: metricsPort, Name: metrics.OperatorPortName, Protocol: corev1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}},
{Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: corev1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}},
}
// Create Service object to expose the metrics port(s).
service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts)
if err != nil {
log.Info("Could not create metrics Service", "error", err.Error())
// If this operator is deployed to a cluster without the prometheus-operator running, it will return
// ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation.
if err == metrics.ErrServiceMonitorNotPresent {
log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error())
}
}
// CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources
// necessary to configure Prometheus to scrape metrics from this operator.
services := []*corev1.Service{service}
// The ServiceMonitor is created in the same namespace where the operator is deployed
_, err = metrics.CreateServiceMonitors(cfg, operatorNs, services)
if err != nil {
log.Info("Could not create ServiceMonitor object", "error", err.Error())
// If this operator is deployed to a cluster without the prometheus-operator running, it will return
// ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation.
if err == metrics.ErrServiceMonitorNotPresent {
log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error())
}
}
}
// serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types.
// It serves those metrics on "http://metricsHost:operatorMetricsPort".
func serveCRMetrics(cfg *rest.Config, operatorNs string) error {
// The function below returns a list of filtered operator/CR specific GVKs. For more control, override the GVK list below
// with your own custom logic. Note that if you are adding third party API schemas, probably you will need to
// customize this implementation to avoid permissions issues.
filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme)
if err != nil {
return err
}
// The metrics will be generated from the namespaces which are returned here.
// NOTE that passing nil or an empty list of namespaces in GenerateAndServeCRMetrics will result in an error.
ns, err := kubemetrics.GetNamespacesForMetrics(operatorNs)
if err != nil {
return err
}
// Generate and serve custom resource specific metrics.
err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort)
if err != nil {
return err
}
return nil
}
func setupWebhooks(mgr manager.Manager) error {
rhmiConfigRegister, err := webhooks.WebhookRegisterFor(&integreatlyv1alpha1.RHMIConfig{})
if err != nil {
return err
}
webhooks.Config.AddWebhook(webhooks.IntegreatlyWebhook{
Name: "rhmiconfig",
Register: rhmiConfigRegister,
Rule: webhooks.NewRule().
OneResource("integreatly.org", "v1alpha1", "rhmiconfigs").
ForCreate().
ForUpdate().
NamespacedScope(),
})
webhooks.Config.AddWebhook(webhooks.IntegreatlyWebhook{
Name: "rhmiconfig-mutate",
Rule: webhooks.NewRule().
OneResource("integreatly.org", "v1alpha1", "rhmiconfigs").
ForCreate().
ForUpdate().
NamespacedScope(),
Register: webhooks.AdmissionWebhookRegister{
Type: webhooks.MutatingType,
Path: "/mutate-rhmiconfig",
Hook: &admission.Webhook{
Handler: integreatlyv1alpha1.NewRHMIConfigMutatingHandler(),
},
},
})
if err := webhooks.Config.SetupServer(mgr); err != nil {
return err
}
return nil
}
| init |
test_filealchemy.py | from textwrap import dedent
import pytest
from sqlalchemy import Column, ForeignKey, String, Text
from sqlalchemy.orm import relationship
from flask_filealchemy import FileAlchemy, LoadError
def test_directory_does_not_exist(db):
app = db.get_app()
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
db.app.config['FILEALCHEMY_MODELS'] = (Author,)
db.app.config['FILEALCHEMY_DATA_DIR'] = '/does/not/exist/'
with pytest.raises(LoadError):
FileAlchemy(app, db).load_tables()
def test_invalid_directory(db, tmpdir):
app = db.get_app()
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
file_ = tmpdir.join('file.yml')
file_.write('does not matter')
db.app.config['FILEALCHEMY_MODELS'] = (Author,)
db.app.config['FILEALCHEMY_DATA_DIR'] = file_.strpath
with pytest.raises(LoadError):
FileAlchemy(app, db).load_tables()
def test_model_not_found(db, tmpdir):
|
def test_load_single_table(db, tmpdir):
app = db.get_app()
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
data_dir = tmpdir.mkdir('data_dir')
authors_dir = data_dir.mkdir('authors')
max_mustermann = authors_dir.join('max-mustermann.yml')
erika_mustermann = authors_dir.join('erika-mustermann.yml')
max_mustermann.write(
dedent(
'''
slug: max-mustermann
name: Max Mustermann
'''
)
)
erika_mustermann.write(
dedent(
'''
slug: erika-mustermann
name: Erika Mustermann
'''
)
)
db.app.config['FILEALCHEMY_MODELS'] = (Author,)
db.app.config['FILEALCHEMY_DATA_DIR'] = data_dir.strpath
FileAlchemy(app, db).load_tables()
assert Author.query.count() == 2
def test_invalid_data(db, tmpdir):
app = db.get_app()
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
data_dir = tmpdir.mkdir('data_dir')
authors_dir = data_dir.mkdir('authors')
invalid = authors_dir.join('invalid.yml')
for data in ('invalid', '[1, 2, 3]', 'key: value'):
invalid.write(data)
db.app.config['FILEALCHEMY_MODELS'] = (Author,)
db.app.config['FILEALCHEMY_DATA_DIR'] = data_dir.strpath
with pytest.raises(LoadError):
FileAlchemy(app, db).load_tables()
def test_foreign_keys(db, tmpdir):
app = db.get_app()
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
class Book(db.Model):
__tablename__ = 'books'
slug = Column(String(255), primary_key=True)
title = Column(String(255), nullable=False)
author_slug = Column(
String(255), ForeignKey('authors.slug'), nullable=False
)
contents = Column(Text, default=None)
author = relationship('Author', backref='books')
data_dir = tmpdir.mkdir('data_dir')
authors_dir = data_dir.mkdir('authors')
books_dir = data_dir.mkdir('books')
author = authors_dir.join('author.yml')
first_book = books_dir.join('first-book.yml')
second_book = books_dir.join('second-book.yml')
author.write(
dedent(
'''
slug: max-mustermann
name: Max Mustermann
'''
)
)
first_book.write(
dedent(
'''
slug: first-book
title: First Book
author_slug: max-mustermann
contents: |
First line.
Second line.
'''
)
)
second_book.write(
dedent(
'''
slug: second-book
title: Second Book
author_slug: max-mustermann
contents: |
First line.
Second line.
'''
)
)
db.app.config['FILEALCHEMY_MODELS'] = (Author, Book)
db.app.config['FILEALCHEMY_DATA_DIR'] = data_dir.strpath
FileAlchemy(app, db).load_tables()
assert Author.query.count() == 1
assert Book.query.count() == 2
def test_load_from_all_file(db, tmpdir):
app = db.get_app()
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
data_dir = tmpdir.mkdir('data_dir')
authors_dir = data_dir.mkdir('authors')
all_ = authors_dir.join('_all.yml')
all_.write(
dedent(
'''
- slug: max-mustermann
name: Max Mustermann
- slug: erika-mustermann
name: Erika Mustermann
'''
)
)
db.app.config['FILEALCHEMY_MODELS'] = (Author,)
db.app.config['FILEALCHEMY_DATA_DIR'] = data_dir.strpath
FileAlchemy(app, db).load_tables()
assert Author.query.count() == 2
| app = db.get_app()
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(32), primary_key=True)
class Book(db.Model):
__tablename__ = 'books'
slug = Column(String(32), primary_key=True)
data_dir = tmpdir.mkdir('data_dir')
authors_dir = data_dir.mkdir('authors')
max_mustermann = authors_dir.join('max-mustermann.yml')
max_mustermann.write('slug: max-mustermann')
books_dir = data_dir.mkdir('books')
muster_book = books_dir.join('muster-book.yml')
muster_book.write('slug: muster-book')
db.app.config['FILEALCHEMY_MODELS'] = (Author,)
db.app.config['FILEALCHEMY_DATA_DIR'] = data_dir.strpath
with pytest.raises(LoadError, match='no model found'):
FileAlchemy(app, db).load_tables() |
box-in-tup.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(managed_boxes)]
use std::gc::{Gc, GC};
pub fn main() | {
let i: (Gc<int>, int) = (box(GC) 10, 10);
let (_a, _) = i;
} |
|
sorter.go | /*
Package idxsort provides types and functions for sorting sequences via separate indexes.
*/
package idxsort
import (
"sort"
)
/*
Sorter wraps a sequence implementing the sort.Interface interface, supports ascending and descending sort, and duplicates index swaps to other sequences via a swap function.
Methods of Sorter are not safe for concurrent use. The provided index and/or swap functions may also require explicit locking to prevent concurrent access.
Sorter implements the sort.Interface interface.
*/
type Sorter struct {
index sort.Interface
swapFunc func(i, j int)
ascending bool
}
/*
NewSorter instantiates a new Sorter with the provided index and swap function.
*/
func NewSorter(index sort.Interface, swapFunc func(i, j int)) (s *Sorter) {
s = &Sorter{
index: index,
swapFunc: swapFunc,
ascending: true,
}
return
}
/*
Sorts the index in ascending or descending order.
*/
func (s *Sorter) Sort(ascending bool) {
s.ascending = ascending
sort.Sort(s)
}
/*
Sorts the index in ascending order.
*/
func (s *Sorter) SortAsc() {
s.Sort(true)
}
/*
Sorts the index in descending order.
*/
func (s *Sorter) SortDesc() {
s.Sort(false)
}
func (s *Sorter) Len() int {
return s.index.Len()
}
func (s *Sorter) Swap(i, j int) {
s.index.Swap(i, j)
s.swapFunc(i, j)
}
func (s *Sorter) Less(i, j int) bool {
if s.ascending {
return s.index.Less(i, j)
} else {
return s.index.Less(j, i)
}
}
/*
Sorts an index in the provided order, and duplicates index swaps to swapFunc.
*/
func Sort(index sort.Interface, ascending bool, swapFunc func(i, j int)) {
sorter := NewSorter(index, swapFunc)
sorter.Sort(ascending)
}
/*
Sorts an index in ascending order, and duplicates index swaps to swapFunc.
*/
func | (index sort.Interface, swapFunc func(i, j int)) {
Sort(index, true, swapFunc)
}
/*
Sorts an index in descending order, and duplicates index swaps to swapFunc.
*/
func SortDesc(index sort.Interface, swapFunc func(i, j int)) {
Sort(index, false, swapFunc)
}
| SortAsc |
driver_monitor.py | from math import atan2, sqrt
from cereal import car
from common.numpy_fast import interp
from common.realtime import DT_DMON
from selfdrive.hardware import TICI
from common.filter_simple import FirstOrderFilter
from common.stat_live import RunningStatFilter
EventName = car.CarEvent.EventName
# ******************************************************************************************
# NOTE: To fork maintainers.
# Disabling or nerfing safety features may get you and your users banned from our servers.
# We recommend that you do not change these numbers from the defaults.
# ******************************************************************************************
class DRIVER_MONITOR_SETTINGS():
def __init__(self, TICI=TICI, DT_DMON=DT_DMON):
self._DT_DMON = DT_DMON
self._AWARENESS_TIME = 35. # passive wheeltouch total timeout
self._AWARENESS_PRE_TIME_TILL_TERMINAL = 12.
self._AWARENESS_PROMPT_TIME_TILL_TERMINAL = 6.
self._DISTRACTED_TIME = 11. # active monitoring total timeout
self._DISTRACTED_PRE_TIME_TILL_TERMINAL = 8.
self._DISTRACTED_PROMPT_TIME_TILL_TERMINAL = 6.
self._FACE_THRESHOLD = 0.5
self._PARTIAL_FACE_THRESHOLD = 0.75 if TICI else 0.5
self._EYE_THRESHOLD = 0.5
self._SG_THRESHOLD = 0.5
self._BLINK_THRESHOLD = 0.88 if TICI else 0.5
self._BLINK_THRESHOLD_SLACK = 0.98 if TICI else 0.65
self._BLINK_THRESHOLD_STRICT = 0.88 if TICI else 0.5
self._PITCH_WEIGHT = 1.175 if TICI else 1.35 # pitch matters a lot more
self._POSESTD_THRESHOLD = 0.318 if TICI else 0.14
self._E2E_POSE_THRESHOLD = 0.95 if TICI else 0.9
self._E2E_EYES_THRESHOLD = 0.75
self._METRIC_THRESHOLD = 0.5 if TICI else 0.4
self._METRIC_THRESHOLD_SLACK = 0.6875 if TICI else 0.55
self._METRIC_THRESHOLD_STRICT = 0.5 if TICI else 0.4
self._PITCH_POS_ALLOWANCE = 0.12 # rad, to not be too sensitive on positive pitch
self._PITCH_NATURAL_OFFSET = 0.02 # people don't seem to look straight when they drive relaxed, rather a bit up
self._YAW_NATURAL_OFFSET = 0.08 # people don't seem to look straight when they drive relaxed, rather a bit to the right (center of car)
self._HI_STD_FALLBACK_TIME = int(10 / self._DT_DMON) # fall back to wheel touch if model is uncertain for 10s
self._DISTRACTED_FILTER_TS = 0.25 # 0.6Hz
self._POSE_CALIB_MIN_SPEED = 13 # 30 mph
self._POSE_OFFSET_MIN_COUNT = int(60 / self._DT_DMON) # valid data counts before calibration completes, 1min cumulative
self._POSE_OFFSET_MAX_COUNT = int(360 / self._DT_DMON) # stop deweighting new data after 6 min, aka "short term memory"
self._RECOVERY_FACTOR_MAX = 5. # relative to minus step change
self._RECOVERY_FACTOR_MIN = 1.25 # relative to minus step change
self._MAX_TERMINAL_ALERTS = 3 # not allowed to engage after 3 terminal alerts
self._MAX_TERMINAL_DURATION = int(30 / self._DT_DMON) # not allowed to engage after 30s of terminal alerts
# model output refers to center of cropped image, so need to apply the x displacement offset
RESIZED_FOCAL = 320.0
H, W, FULL_W = 320, 160, 426
class DistractedType:
NOT_DISTRACTED = 0
BAD_POSE = 1
BAD_BLINK = 2
def face_orientation_from_net(angles_desc, pos_desc, rpy_calib, is_rhd):
# the output of these angles are in device frame
# so from driver's perspective, pitch is up and yaw is right
pitch_net, yaw_net, roll_net = angles_desc
face_pixel_position = ((pos_desc[0] + .5)*W - W + FULL_W, (pos_desc[1]+.5)*H)
yaw_focal_angle = atan2(face_pixel_position[0] - FULL_W//2, RESIZED_FOCAL)
pitch_focal_angle = atan2(face_pixel_position[1] - H//2, RESIZED_FOCAL)
pitch = pitch_net + pitch_focal_angle
yaw = -yaw_net + yaw_focal_angle
# no calib for roll
pitch -= rpy_calib[1]
yaw -= rpy_calib[2] * (1 - 2 * int(is_rhd)) # lhd -> -=, rhd -> +=
return roll_net, pitch, yaw
class DriverPose():
def __init__(self, max_trackable):
self.yaw = 0.
self.pitch = 0.
self.roll = 0.
self.yaw_std = 0.
self.pitch_std = 0.
self.roll_std = 0.
self.pitch_offseter = RunningStatFilter(max_trackable=max_trackable)
self.yaw_offseter = RunningStatFilter(max_trackable=max_trackable)
self.low_std = True
self.cfactor = 1.
class DriverBlink():
def __init__(self):
self.left_blink = 0.
self.right_blink = 0.
self.cfactor = 1.
class DriverStatus():
def __init__(self, rhd=False, settings=DRIVER_MONITOR_SETTINGS()):
# init policy settings
self.settings = settings
# init driver status
self.is_rhd_region = rhd
self.pose = DriverPose(self.settings._POSE_OFFSET_MAX_COUNT)
self.pose_calibrated = False
self.blink = DriverBlink()
self.awareness = 1.
self.awareness_active = 1.
self.awareness_passive = 1.
self.driver_distracted = False
self.driver_distraction_filter = FirstOrderFilter(0., self.settings._DISTRACTED_FILTER_TS, self.settings._DT_DMON)
self.face_detected = False
self.face_partial = False
self.terminal_alert_cnt = 0
self.terminal_time = 0
self.step_change = 0.
self.active_monitoring_mode = True
self.is_model_uncertain = False
self.hi_stds = 0
self.threshold_pre = self.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self.threshold_prompt = self.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self._set_timers(active_monitoring=True)
def _set_timers(self, active_monitoring):
if self.active_monitoring_mode and self.awareness <= self.threshold_prompt:
if active_monitoring:
self.step_change = self.settings._DT_DMON / self.settings._DISTRACTED_TIME
else:
self.step_change = 0.
return # no exploit after orange alert
elif self.awareness <= 0.:
return
if active_monitoring:
# when falling back from passive mode to active mode, reset awareness to avoid false alert
if not self.active_monitoring_mode:
self.awareness_passive = self.awareness
self.awareness = self.awareness_active
self.threshold_pre = self.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self.threshold_prompt = self.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self.step_change = self.settings._DT_DMON / self.settings._DISTRACTED_TIME
self.active_monitoring_mode = True
else:
if self.active_monitoring_mode:
self.awareness_active = self.awareness
self.awareness = self.awareness_passive
self.threshold_pre = self.settings._AWARENESS_PRE_TIME_TILL_TERMINAL / self.settings._AWARENESS_TIME
self.threshold_prompt = self.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL / self.settings._AWARENESS_TIME
self.step_change = self.settings._DT_DMON / self.settings._AWARENESS_TIME
self.active_monitoring_mode = False
def _is_driver_distracted(self, pose, blink):
if not self.pose_calibrated:
pitch_error = pose.pitch - self.settings._PITCH_NATURAL_OFFSET
yaw_error = pose.yaw - self.settings._YAW_NATURAL_OFFSET
else:
pitch_error = pose.pitch - self.pose.pitch_offseter.filtered_stat.mean()
yaw_error = pose.yaw - self.pose.yaw_offseter.filtered_stat.mean()
# positive pitch allowance
if pitch_error > 0.:
pitch_error = max(pitch_error - self.settings._PITCH_POS_ALLOWANCE, 0.)
pitch_error *= self.settings._PITCH_WEIGHT
pose_metric = sqrt(yaw_error**2 + pitch_error**2)
if pose_metric > self.settings._METRIC_THRESHOLD*pose.cfactor:
return DistractedType.BAD_POSE
elif (blink.left_blink + blink.right_blink)*0.5 > self.settings._BLINK_THRESHOLD*blink.cfactor:
return DistractedType.BAD_BLINK
else:
return DistractedType.NOT_DISTRACTED
def | (self, model_data):
ep = min(model_data.meta.engagedProb, 0.8) / 0.8
self.pose.cfactor = interp(ep, [0, 0.5, 1],
[self.settings._METRIC_THRESHOLD_STRICT,
self.settings. _METRIC_THRESHOLD,
self.settings._METRIC_THRESHOLD_SLACK]) / self.settings._METRIC_THRESHOLD
self.blink.cfactor = interp(ep, [0, 0.5, 1],
[self.settings._BLINK_THRESHOLD_STRICT,
self.settings._BLINK_THRESHOLD,
self.settings._BLINK_THRESHOLD_SLACK]) / self.settings._BLINK_THRESHOLD
def get_pose(self, driver_state, cal_rpy, car_speed, op_engaged):
if not all(len(x) > 0 for x in [driver_state.faceOrientation, driver_state.facePosition,
driver_state.faceOrientationStd, driver_state.facePositionStd]):
return
self.face_partial = driver_state.partialFace > self.settings._PARTIAL_FACE_THRESHOLD
self.face_detected = driver_state.faceProb > self.settings._FACE_THRESHOLD or self.face_partial
self.pose.roll, self.pose.pitch, self.pose.yaw = face_orientation_from_net(driver_state.faceOrientation, driver_state.facePosition, cal_rpy, self.is_rhd_region)
self.pose.pitch_std = driver_state.faceOrientationStd[0]
self.pose.yaw_std = driver_state.faceOrientationStd[1]
# self.pose.roll_std = driver_state.faceOrientationStd[2]
model_std_max = max(self.pose.pitch_std, self.pose.yaw_std)
self.pose.low_std = model_std_max < self.settings._POSESTD_THRESHOLD and not self.face_partial
self.blink.left_blink = driver_state.leftBlinkProb * (driver_state.leftEyeProb > self.settings._EYE_THRESHOLD) * (driver_state.sunglassesProb < self.settings._SG_THRESHOLD)
self.blink.right_blink = driver_state.rightBlinkProb * (driver_state.rightEyeProb > self.settings._EYE_THRESHOLD) * (driver_state.sunglassesProb < self.settings._SG_THRESHOLD)
distracted_normal = self._is_driver_distracted(self.pose, self.blink) > 0 and \
driver_state.faceProb > self.settings._FACE_THRESHOLD and self.pose.low_std
distracted_E2E = (driver_state.distractedPose > self.settings._E2E_POSE_THRESHOLD or driver_state.distractedEyes > self.settings._E2E_EYES_THRESHOLD) and \
(self.face_detected and not self.face_partial)
self.driver_distracted = distracted_normal or distracted_E2E
self.driver_distraction_filter.update(self.driver_distracted)
# update offseter
# only update when driver is actively driving the car above a certain speed
if self.face_detected and car_speed > self.settings._POSE_CALIB_MIN_SPEED and self.pose.low_std and (not op_engaged or not self.driver_distracted):
self.pose.pitch_offseter.push_and_update(self.pose.pitch)
self.pose.yaw_offseter.push_and_update(self.pose.yaw)
self.pose_calibrated = self.pose.pitch_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT and \
self.pose.yaw_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT
self.is_model_uncertain = self.hi_stds > self.settings._HI_STD_FALLBACK_TIME
self._set_timers(self.face_detected and not self.is_model_uncertain)
if self.face_detected and not self.pose.low_std and not self.driver_distracted:
self.hi_stds += 1
elif self.face_detected and self.pose.low_std:
self.hi_stds = 0
def update(self, events, driver_engaged, ctrl_active, standstill):
if (driver_engaged and self.awareness > 0) or not ctrl_active:
# reset only when on disengagement if red reached
self.awareness = 1.
self.awareness_active = 1.
self.awareness_passive = 1.
return
driver_attentive = self.driver_distraction_filter.x < 0.37
awareness_prev = self.awareness
if (driver_attentive and self.face_detected and self.pose.low_std and self.awareness > 0):
# only restore awareness when paying attention and alert is not red
self.awareness = min(self.awareness + ((self.settings._RECOVERY_FACTOR_MAX-self.settings._RECOVERY_FACTOR_MIN)*(1.-self.awareness)+self.settings._RECOVERY_FACTOR_MIN)*self.step_change, 1.)
if self.awareness == 1.:
self.awareness_passive = min(self.awareness_passive + self.step_change, 1.)
# don't display alert banner when awareness is recovering and has cleared orange
if self.awareness > self.threshold_prompt:
return
standstill_exemption = standstill and self.awareness - self.step_change <= self.threshold_prompt
certainly_distracted = self.driver_distraction_filter.x > 0.63 and self.driver_distracted and self.face_detected
maybe_distracted = self.hi_stds > self.settings._HI_STD_FALLBACK_TIME or not self.face_detected
if certainly_distracted or maybe_distracted:
# should always be counting if distracted unless at standstill and reaching orange
if not standstill_exemption:
self.awareness = max(self.awareness - self.step_change, -0.1)
alert = None
if self.awareness <= 0.:
# terminal red alert: disengagement required
alert = EventName.driverDistracted if self.active_monitoring_mode else EventName.driverUnresponsive
self.terminal_time += 1
if awareness_prev > 0.:
self.terminal_alert_cnt += 1
elif self.awareness <= self.threshold_prompt:
# prompt orange alert
alert = EventName.promptDriverDistracted if self.active_monitoring_mode else EventName.promptDriverUnresponsive
elif self.awareness <= self.threshold_pre:
# pre green alert
alert = EventName.preDriverDistracted if self.active_monitoring_mode else EventName.preDriverUnresponsive
if alert is not None:
events.add(alert)
| set_policy |
extractor.py |
#Copyright (c) 2017 Andre Santos
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
###############################################################################
# Imports
###############################################################################
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import next
from builtins import str
from builtins import range
from past.builtins import basestring
from builtins import object
from fnmatch import fnmatch
import itertools
import logging
from operator import attrgetter
import os
import re
import subprocess
from urllib.request import urlopen
from urllib.error import URLError
import xml.etree.ElementTree as ET
import yaml
from bonsai.model import (
CodeGlobalScope, CodeReference, CodeFunctionCall, pretty_str
)
from bonsai.cpp.model import (
CppEntity, CppFunctionCall, CppDefaultArgument, CppOperator, CppReference
)
from bonsai.analysis import (
CodeQuery, resolve_reference, resolve_expression, get_control_depth,
get_conditions, get_condition_paths, is_under_loop
)
try:
from bonsai.cpp.clang_parser import CppAstParser
except ImportError:
CppAstParser = None
from bonsai.py.py_parser import PyAstParser
from rospkg import RosPack, RosStack, ResourceNotFound
from xml.etree.cElementTree import ElementTree
from distutils.spawn import find_executable
from .cmake_parser import RosCMakeParser
from .launch_parser import LaunchParser, LaunchParserError
from .metamodel import (
Project, Repository, Package, SourceFile, Node, Person, SourceCondition,
AdvertiseCall, SubscribeCall, AdvertiseServiceCall,
ServiceClientCall, Location, GetParamCall, SetParamCall
)
from .util import cwd
###############################################################################
# Utility
###############################################################################
class LoggingObject(object):
log = logging.getLogger(__name__)
def findRosPackages(paths = None, as_stack = False):
"""
Find ROS packages inside folders.
:param paths: [list] of [str] File system path to search, [None] to use the ROS default search paths.
:param as_stack: [bool] Whether the paths point to stacks.
:returns: [dict] Dictionary of [str]package_name -> [str]package_path.
"""
ros_version = os.environ.get("ROS_VERSION")
if ros_version != "1":
# try ROS2 crawling with colcon if possible
# (in ambiguous cases, we give preference to trying the ROS2 method first,
# because ROS1 rospkg only produces misleading/
# incorrect information when used in ROS2/mixed workspaces.
colcon = find_executable('colcon')
if colcon != None:
cmd = [colcon, 'list']
if paths != None:
cmd.extend(['--base-paths'])
cmd.extend(paths)
try:
pkglist = subprocess.check_output(cmd)
# format is <pkg_name>\t<pkg_path>\t<build_system>\n
pkglist = pkglist.split('\n')
pkgs = {}
for pkginfo in pkglist:
pkginfo_parts = pkginfo.split('\t')
if len(pkginfo_parts) < 2:
continue
if pkginfo_parts[0] in pkgs:
continue
pkgs[pkginfo_parts[0]] = pkginfo_parts[1]
return pkgs
except:
pass
# ^ if colcon != None
# ^ if ros_version != "1"
# else: try the ROS1 way
ros = None
if as_stack:
ros = RosStack.get_instance(paths)
else:
ros = RosPack.get_instance(paths)
pkg_names = ros.list()
pkgs = {}
for pkg_name in pkg_names:
if pkg_name in pkgs:
continue
pkgs[pkg_name] = ros.get_path(pkg_name)
return pkgs
# ^ findRosPackages(paths)
_EMPTY_DICT = {}
_EMPTY_LIST = ()
###############################################################################
# Source Extractor
###############################################################################
class ProjectExtractor(LoggingObject):
def __init__(self, index_file, env = None, pkg_cache = None,
repo_cache = None, repo_path = None, distro_url = None,
require_repos = False, parse_nodes = False, node_cache = None):
self.log.debug("ProjectExtractor(%s, %s, %s)",
index_file, repo_path, distro_url)
self.index_file = index_file
self.repo_path = repo_path
self.distribution = distro_url
self.require_repos = require_repos
self.parse_nodes = parse_nodes
self.environment = env if not env is None else {}
self.package_cache = pkg_cache if not pkg_cache is None else {}
self.repo_cache = repo_cache if not repo_cache is None else {}
self.node_cache = node_cache if not node_cache is None else {}
self.project = None
self.packages = None
self.missing = None
self.repositories = None
self.configurations = None
self.node_specs = None
self.rules = None
self.analysis = None
self._extra_packages = set()
def index_source(self, settings=None):
self.log.debug("ProjectExtractor.index_source()")
self._setup()
settings.update_analysis_preferences(self.analysis)
self._load_user_repositories()
self._find_local_packages()
if self.missing and self.distribution:
self._load_distro_repositories()
self._find_local_packages()
self._topological_sort()
for name in self.missing:
self.log.warning("Could not find package " + name)
self._populate_packages_and_dependencies(settings=settings)
self._update_node_cache()
self._find_nodes(settings)
self._update_nodes_from_specs()
def _setup(self):
try:
with open(self.index_file, "r") as handle:
data = yaml.safe_load(handle)
except IOError as e:
data = {}
self.project = Project(data.get("project", "default"))
self.repositories = data.get("repositories", {})
self.packages = set(data.get("packages")
or list(findRosPackages(["."])))
self.missing = set(self.packages)
self.configurations = data.get("configurations", {})
self.node_specs = data.get("nodes", {})
self.project.node_specs = self.node_specs
self.rules = data.get("rules", {})
self.analysis = data.get("analysis", {})
for node_name in self.node_specs:
if not "/" in node_name:
raise ValueError("expected '<pkg>/<node>' in node specs")
pkg, exe = node_name.split("/")
self._extra_packages.add(pkg)
self.missing.update(self._extra_packages)
def _load_user_repositories(self):
self.log.info("Looking up user provided repositories.")
extractor = RepositoryExtractor()
for name, data in self.repositories.items():
repo = self.repo_cache.get(name)
if repo:
self.project.repositories.append(repo)
else:
extractor.load_from_user(name, data, project = self.project)
if self.repo_path:
try:
extractor.download(self.repo_path)
except RepositoryCloneError as e:
if self.require_repos:
raise e
else:
self.log.warning("Could not download all repositories.")
def _find_local_packages(self):
self.log.info("Looking for packages locally.")
cdir = os.path.abspath(".")
alt_paths = [self.repo_path, cdir] if self.repo_path else [cdir]
extractor = PackageExtractor(alt_paths = alt_paths)
extractor.refresh_package_cache()
found = []
for name in self.missing:
analyse = name in self.packages
pkg = self.package_cache.get(name)
if pkg:
self.project.packages.append(pkg)
found.append(name)
pkg._analyse = analyse
else:
pkg = extractor.find_package(name, project=self.project)
if pkg:
found.append(name)
pkg._analyse = analyse
self.missing.difference_update(found)
def _load_distro_repositories(self):
self.log.info("Looking up repositories from official distribution.")
try:
data = yaml.safe_load(urlopen(self.distribution).read())["repositories"]
except URLError as e:
self.log.warning("Could not download distribution data.")
return
extractor = RepositoryExtractor()
extractor.load_needed_from_distro(data, self.missing, self.project)
if self.repo_path:
try:
extractor.download(self.repo_path)
except RepositoryCloneError as e:
if self.require_repos:
raise e
else:
self.log.warning("Could not download all repositories.")
def _topological_sort(self):
dependencies = {}
pending = list(self.project.packages)
for pkg in self.project.packages:
pkg.topological_tier = -1
dependencies[pkg.id] = set(p for p in pkg.dependencies.packages
if p in self.packages)
tier = 1
emitted = []
while pending:
next_pending = []
next_emitted = []
for pkg in pending:
deps = dependencies[pkg.id]
deps.difference_update(emitted)
if deps:
next_pending.append(pkg)
else:
pkg.topological_tier = tier
next_emitted.append(pkg.name)
if not next_emitted:
# cyclic dependencies detected
self.log.warning("Cyclic dependencies: %s", next_pending)
for pkg in next_pending:
pkg.topological_tier = tier
next_pending = None
pending = next_pending
emitted = next_emitted
tier += 1
self.project.packages.sort(key = attrgetter("topological_tier", "id"))
def _populate_packages_and_dependencies(self, settings=None):
found = set()
extractor = PackageExtractor()
extractor.packages = self.project.packages
for pkg in self.project.packages:
found.add(pkg.name)
analysis_ignore = extractor._populate_package(
pkg, ignored_globs=settings.ignored_globs)
if settings is not None:
settings.ignored_lines.update(analysis_ignore)
deps = extractor._extra
extractor._extra = []
while deps:
pkg = deps.pop()
assert pkg.name not in found
pkg._analyse = False
found.add(pkg.name)
self.project.packages.append(pkg)
analysis_ignore = extractor._populate_package(
pkg, ignored_globs=settings.ignored_globs)
if settings is not None:
settings.ignored_lines.update(analysis_ignore)
deps.extend(extractor._extra)
extractor._extra = []
def _find_nodes(self, settings):
pkgs = {pkg.name: pkg for pkg in self.project.packages if pkg._analyse}
ws = settings.workspace
if not ws:
ws = settings.find_ros_workspace()
ws = os.path.abspath(ws)
if CppAstParser is None:
self.log.warning("C++ AST parser not found.")
extractor = NodeExtractor(pkgs, self.environment, ws = ws,
node_cache = self.node_cache,
parse_nodes = self.parse_nodes)
if self.parse_nodes and CppAstParser is not None:
if settings is None:
CppAstParser.set_library_path()
db_dir = os.path.join(extractor.workspace, "build")
if os.path.isfile(
os.path.join(db_dir, "compile_commands.json")):
CppAstParser.set_database(db_dir)
else:
#library file if given explicitly, otherwise path
if settings.cpp_parser_lib_file:
CppAstParser.set_library_file(settings.cpp_parser_lib_file)
else:
CppAstParser.set_library_path(settings.cpp_parser_lib)
CppAstParser.set_standard_includes(settings.cpp_includes)
db_dir = settings.cpp_compile_db
if db_dir and os.path.isfile(
os.path.join(db_dir, "compile_commands.json")):
CppAstParser.set_database(settings.cpp_compile_db)
for pkg in self.project.packages:
if pkg._analyse and pkg.name not in self.package_cache:
extractor.find_nodes(pkg)
def _update_node_cache(self):
self.log.debug("Importing cached Nodes.")
data = [datum for datum in self.node_cache.values()]
self.node_cache = {}
empty_dict = {}
empty_list = ()
for datum in data:
try:
pkg = self._get_package(datum["package"])
source_files = self._get_files(pkg, datum["files"])
except ValueError as e:
# either a package or a file is no longer part of the analysis
self.log.debug("Cached node %s: %s", datum["name"], e)
continue
mtime = datum["timestamp"]
for sf in source_files:
if sf.timestamp > mtime:
# a file was modified, needs to be parsed again
break
else:
node = Node(datum["name"], pkg, rosname = datum["rosname"],
nodelet = datum["nodelet"])
node.source_files = source_files
for p in datum["advertise"]:
node.advertise.append(self._pub_from_JSON(p))
for p in datum["subscribe"]:
node.subscribe.append(self._sub_from_JSON(p))
for p in datum["service"]:
node.service.append(self._srv_from_JSON(p))
for p in datum["client"]:
node.client.append(self._client_from_JSON(p))
for p in datum["readParam"]:
node.read_param.append(self._read_from_JSON(p))
for p in datum["writeParam"]:
node.write_param.append(self._write_from_JSON(p))
hpl = datum.get("hpl", empty_dict)
for p in hpl.get("properties", empty_list):
node.hpl_properties.append(p)
for a in hpl.get("assumptions", empty_list):
node.hpl_assumptions.append(a)
self.node_cache[node.node_name] = node
def _update_nodes_from_specs(self):
self.log.debug("Loading Nodes from specs.")
pkg_finder = PackageExtractor()
pkg_finder.packages.extend(self.project.packages)
nhm = NodeHints2(self.node_specs, pkg_finder=pkg_finder)
# nodes = dict(self.node_cache)
for pkg in self.project.packages:
for node in pkg.nodes:
node_type = node.node_name
if node_type not in self.node_cache:
self.log.debug(
"WARNING node %s is not in node cache!", node_type)
self.node_cache[node_type] = node
new_nodes = nhm.apply_to(self.node_cache, create=True)
for node in new_nodes:
assert node.node_name not in self.node_cache
self.node_cache[node.node_name] = node
node.package.nodes.append(node)
def _get_package(self, name):
for pkg in self.project.packages:
if pkg.name == name:
return pkg
raise ValueError("cannot find package: " + name)
def _get_files(self, pkg, filenames):
files = []
for filename in filenames:
found = False
for sf in pkg.source_files:
if sf.full_name == filename:
found = True
files.append(sf)
break
if not found:
raise ValueError("cannot find file: " + filename)
return files
def _pub_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return AdvertiseCall(datum["name"], datum["namespace"], datum["type"],
datum["queue"], latched=datum.get("latched", False),
control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs, location = l(datum["location"]))
def _sub_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return SubscribeCall(datum["name"], datum["namespace"], datum["type"],
datum["queue"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs, location = l(datum["location"]))
def _srv_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return AdvertiseServiceCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs,
location = l(datum["location"]))
def _client_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return ServiceClientCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs,
location = l(datum["location"]))
def _read_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return GetParamCall(datum["name"], datum["namespace"],
datum["type"], default_value=datum["default_value"],
control_depth=datum["depth"], repeats=datum["repeats"],
conditions=cs, location=l(datum["location"]))
def _write_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return SetParamCall(datum["name"], datum["namespace"],
datum["type"], value=datum["value"],
control_depth=datum["depth"], repeats=datum["repeats"],
conditions=cs, location=l(datum["location"]))
def _location_from_JSON(self, datum):
if datum is None:
return None
try:
pkg = self._get_package(datum["package"])
sf = None
filename = datum["file"]
if filename:
sf = self._get_files(pkg, [filename])[0]
except ValueError:
return None
return Location(pkg, file=sf, line=datum["line"], col=datum["column"],
fun=datum["function"], cls=datum["class"])
###############################################################################
# Repository Extractor
###############################################################################
class RepositoryCloneError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RepositoryExtractor(LoggingObject):
def __init__(self):
self.repositories = []
self.declared_packages = set()
def load_from_user(self, name, data, project = None):
self.log.debug("RepositoryExtractor.from_user(%s, %s)", name, data)
repo = Repository(name, proj = project)
repo.status = "private"
repo.vcs = data["type"]
repo.url = data["url"]
repo.version = data["version"]
repo.declared_packages = data["packages"]
self.repositories.append(repo)
self.declared_packages.update(repo.declared_packages)
if project:
project.repositories.append(repo)
return repo
def load_from_distro(self, name, data, project = None):
self.log.debug("RepositoryExtractor.from_distro(%s, %s)", name, data)
if not "source" in data:
self.log.debug("There is no source in provided data.")
return
repo = Repository(name, proj = project)
repo.status = data.get("status")
src = data["source"]
repo.vcs = src["type"]
repo.url = src["url"]
repo.version = src["version"]
if "release" in data:
repo.declared_packages = data["release"].get("packages", [name])
self.repositories.append(repo)
self.declared_packages.update(repo.declared_packages)
if project:
project.repositories.append(repo)
return repo
def load_needed_from_distro(self, data, pkgs, project = None):
if not pkgs:
return True
remaining = set(pkgs)
for name, info in data.items():
if not "release" in info:
continue
for pkg in info["release"].get("packages", [name]):
try:
remaining.remove(pkg)
self.load_from_distro(name, info, project = project)
except KeyError as e:
pass
if not remaining:
break
return not remaining
def download(self, repo_path):
self.log.debug("RepositoryExtractor.download(%s)", repo_path)
for repo in self.repositories:
if not repo.url:
self.log.debug("%s has no URL to download from.", repo.id)
continue
path = os.path.join(repo_path, repo.name)
clone = False
if not os.path.exists(path):
os.makedirs(path)
clone = True
with cwd(path):
if repo.vcs == "git":
self._download_git(repo, path, clone)
elif repo.vcs == "hg":
self._download_hg(repo, path, clone)
elif repo.vcs == "svn":
self._download_svn(repo, path, clone)
return True
GIT_INIT = ("git", "init")
GIT_PULL = ("git", "pull")
GIT_COUNT = ("git", "rev-list", "HEAD", "--count")
def _download_git(self, repo, path, clone = False):
self.log.debug("RepositoryExtractor._download_git(%s)", path)
try:
if clone:
subprocess.check_call(self.GIT_INIT)
subprocess.check_call(["git", "remote",
"add", "-t", repo.version,
"-f", "origin", repo.url])
subprocess.check_call(["git", "checkout", repo.version])
else:
subprocess.check_call(self.GIT_PULL)
repo.path = path
repo.commits = int(subprocess.check_output(self.GIT_COUNT).rstrip())
except subprocess.CalledProcessError as e:
raise RepositoryCloneError("git error: " + str(e))
HG_PULL = ("hg", "pull")
HG_COUNT = ("hg", "id", "--num", "--rev", "tip")
def _download_hg(self, repo, path, clone = False):
self.log.debug("RepositoryExtractor._download_hg(%s)", path)
try:
if clone:
subprocess.check_call(["hg", "clone", repo.url,
"-r", repo.version])
else:
subprocess.check_call(self.HG_PULL)
repo.path = path
repo.commits = int(subprocess.check_output(self.HG_COUNT).rstrip())
except subprocess.CalledProcessError as e:
raise RepositoryCloneError("hg error: " + str(e))
SVN_FETCH = ("git", "svn", "fetch")
def _download_svn(self, repo, path, clone = False):
self.log.debug("RepositoryExtractor._download_svn(%s)", path)
try:
if clone:
if repo.version == "trunk":
version = repo.version
else:
version = "branches/" + repo.version
subprocess.check_call(["git", "svn", "clone",
"-T", version, repo.url])
else:
subprocess.check_call(self.SVN_FETCH)
self.path = path
self.commits = int(subprocess.check_output(self.GIT_COUNT).rstrip())
except subprocess.CalledProcessError as e:
raise RepositoryCloneError("git-svn error: " + str(e))
###############################################################################
# Package Extractor
###############################################################################
class PackageExtractor(LoggingObject):
def __init__(self, alt_paths = None):
self.packages = []
self.rospack_pkgs = None
self.rosstack_pkgs = None
self.alt_paths = alt_paths
self.altpack_pkgs = None
self.altstack_pkgs = None
self._pkg_cache = {}
self._extra = []
def refresh_package_cache(self):
self.rospack_pkgs = None
self.rosstack_pkgs = None
self.altpack_pkgs = None
self.altstack_pkgs = None
# To use with LaunchParser.
def get(self, pkg_id, populate=True):
self.log.debug("%s.get('%s')", type(self).__name__, pkg_id)
if pkg_id in self._pkg_cache:
return self._pkg_cache[pkg_id]
for pkg in self.packages:
if pkg.id == pkg_id:
self._pkg_cache[pkg_id] = pkg
return pkg
try:
assert pkg_id.startswith("package:")
pkg = self._find(pkg_id[8:], None)
self._pkg_cache[pkg_id] = pkg
self._extra.append(pkg)
pkg._analyse = False
if populate:
self._populate_package(pkg)
except (IOError, ET.ParseError, ResourceNotFound):
return None
return pkg
def find_package(self, name, project=None, analyse=True):
try:
pkg = self._find(name, project)
pkg._analyse = analyse
self.packages.append(pkg)
if project:
project.packages.append(pkg)
for repo in project.repositories:
if name in repo.declared_packages:
pkg.repository = repo
repo.packages.append(pkg)
break
# self._populate_package(pkg)
except (IOError, ET.ParseError, KeyError):
return None
return pkg
def find_package_at(self, dirpath, populate=True):
try:
manifest = os.path.join(dirpath, "package.xml")
pkg = PackageParser.parse(manifest)
if pkg.id in self._pkg_cache:
return self._pkg_cache[pkg.id]
else:
self._pkg_cache[pkg.id] = pkg
if pkg not in self._extra:
self._extra.append(pkg)
pkg._analyse = False
if populate:
self._populate_package(pkg)
except (IOError, ET.ParseError, KeyError):
return None
return pkg
def _find(self, name, project):
path = None
if self.alt_paths:
if self.altpack_pkgs == None:
self.altpack_pkgs = findRosPackages(paths=self.alt_paths, as_stack=False)
path = self.altpack_pkgs.get(name, None)
if (path == None):
if self.altstack_pkgs == None:
self.altstack_pkgs = findRosPackages(paths=self.alt_paths, as_stack=True)
path = self.altstack_pkgs.get(name, None)
if path is None:
if self.rospack_pkgs == None:
self.rospack_pkgs = findRosPackages(as_stack=False)
path = self.rospack_pkgs.get(name, None)
if path is None:
if self.rosstack_pkgs == None:
self.rosstack_pkgs = findRosPackages(as_stack=True)
path = self.rosstack_pkgs.get(name, None)
if path is None:
raise KeyError(name)
return PackageParser.parse(os.path.join(path, "package.xml"),
project = project)
EXCLUDED = (".git", "doc", "cmake", ".eggs", "__pycache__")
_START_GLOB = (os.path.sep, '*', '?', '[')
_BYTE_CODE = (".pyc", ".pyd", ".pyo")
def _populate_package(self, pkg, ignored_globs=None):
self.log.debug("PackageExtractor.populate(%s, %s)", pkg, ignored_globs)
if not pkg.path:
self.log.debug("Package %s has no path", pkg.name)
return
self.log.info("Indexing source files for package %s", pkg.name)
analysis_ignore = {}
#pkgs = {pkg.id: pkg for pkg in self.packages}
launch_parser = LaunchParser(pkgs=self)
prefix = len(pkg.path) + len(os.path.sep)
if ignored_globs is None:
ignored_globs = ()
else:
ignored_globs = list(ignored_globs)
for i in range(len(ignored_globs)):
c = ignored_globs[i][0]
if not c in self._START_GLOB:
ignored_globs[i] = '*/' + ignored_globs[i]
for root, subdirs, files in os.walk(pkg.path, topdown=True):
if 'COLCON_IGNORE' in files or 'AMENT_IGNORE' in files or 'CATKIN_IGNORE' in files:
del subdirs[:] # don't traverse into subdirectories
continue # skip
subdirs[:] = [d for d in subdirs if d not in self.EXCLUDED]
path = root[prefix:]
for filename in files:
self.log.debug("Found file %s at %s", filename, path)
source = SourceFile(filename, path, pkg)
self.log.debug("File language: %s", source.language)
sfn = os.path.join(pkg.name, source.full_name)
if source.language == "unknown":
if filename.endswith(self._BYTE_CODE):
self.log.debug("Python bytecode file %s was ignored",
sfn)
continue # skip this file
if any(fnmatch(sfn, pattern)
for pattern in ignored_globs):
self.log.debug(
"File %s was ignored due to glob pattern", sfn)
continue # skip this file
ignore = source.set_file_stats()
if any(v for v in ignore.values()):
analysis_ignore[source.id] = ignore
if pkg._analyse and source.language == "launch":
self.log.info("Parsing launch file: " + source.path)
try:
source.tree = launch_parser.parse(source.path)
except LaunchParserError as e:
self.log.warning("Parsing error in %s:\n%s",
source.path, str(e))
pkg.source_files.append(source)
pkg.size += source.size
pkg.lines += source.lines
pkg.sloc += source.sloc
return analysis_ignore
###############################################################################
# Package Parser
###############################################################################
class PackageParser(LoggingObject):
@staticmethod
def parse(pkg_file, project = None):
PackageParser.log.debug("PkgParser.parse(%s, %s)", pkg_file, project)
with open(pkg_file, "r") as handle:
root = ET.parse(handle).getroot()
name = root.find("name").text.strip()
package = Package(name, proj = project)
package.path = os.path.dirname(pkg_file)
PackageParser.log.info("Found package %s at %s", package, package.path)
PackageParser._parse_metadata(root, package)
PackageParser._parse_export(root, package)
PackageParser._parse_dependencies(root, package)
return package
@staticmethod
def _parse_metadata(xml, package):
package.description = (xml.find("description").text or "").strip()
for el in xml.findall("maintainer"):
name = (el.text or "?").strip()
email = el.get("email") or "[email protected]"
package.maintainers.add(Person(name, email))
for el in xml.findall("author"):
name = (el.text or "?").strip()
email = el.get("email") or "[email protected]"
package.authors.add(Person(name, email))
for el in xml.findall("license"):
package.licenses.add((el.text or "?").strip())
for el in xml.findall("url"):
value = el.get("type")
if value is None or value == "website":
if el.text:
package.website = el.text.strip()
elif value == "repository":
if el.text:
package.vcs_url = el.text.strip()
elif value == "bugtracker":
if el.text:
package.bug_url = el.text.strip()
el = xml.find("version")
if el is not None:
package.version = (el.text or "?").strip()
@staticmethod
def _parse_export(xml, package):
el = xml.find("export")
if not el is None:
package.is_metapackage = not el.find("metapackage") is None
if not el.find("nodelet") is None:
nodelets = el.find("nodelet").get("plugin")
nodelets = nodelets.replace("${prefix}", package.path)
with open(nodelets, "r") as handle:
xmltext = "<export>{}</export>".format(handle.read())
root = ET.fromstring(xmltext)
PackageParser.log.info("Found nodelets at %s", nodelets)
libs = []
for child in root:
if child.tag == "library":
libs.append(child)
else:
libs.extend(child.findall("library"))
for el in libs:
libname = el.get("path").rsplit(os.sep)[-1]
for cl in el.findall("class"):
nodelet = cl.get("type").split("::")[-1]
node = Node(libname, package, nodelet = nodelet)
package.nodes.append(node)
@staticmethod
def _parse_dependencies(xml, package):
sources = ["build_depend"]
if xml.get("format") == "2":
sources.extend(("depend", "build_export_depend", "exec_depend"))
else:
sources.append("run_depend")
for src in sources:
for el in xml.findall(src):
name = el.text.strip()
if name:
package.dependencies.packages.add(name)
###############################################################################
# Hard-coded Node Parser
###############################################################################
class HardcodedNodeParser(LoggingObject):
model_dir = None
distro = None
_cache = {}
@classmethod
def get(cls, pkg, node_type):
cls.log.debug("Fetching hard-coded node: (%s, %s, %s)",
pkg, node_type, cls.distro)
node_id = "node:" + pkg + "/" + node_type
if node_id in cls._cache:
cls.log.debug("Node already in cache.")
return cls._cache[node_id]
filename = os.path.join(cls.model_dir, pkg + ".yaml")
try:
with open(filename) as handle:
data = yaml.safe_load(handle)
except IOError as e:
cls.log.debug("YAML file not found: %s", filename)
return None
if not cls.distro in data:
cls.log.debug("Package has no data for ROS %s.", cls.distro)
return None
if not node_type in data[cls.distro]:
cls.log.debug("Node does not exist for ROS %s.", cls.distro)
return None
cls.log.debug("Building node from YAML data.")
pkg = Package(pkg)
pkg.path = "/tmp/" + pkg.name
node = cls._build_node(node_type, cls.distro, pkg, data)
cls._cache[node_id] = node
return node
@classmethod
def | (cls, node_type, distro, pkg, data):
node_data = data[distro][node_type]
base = node_data.get("base")
if base:
node = cls._build_node(node_type, base, pkg, data)
else:
node = Node(node_type, pkg, rosname = node_data.get("rosname"),
nodelet = node_type if node_data["nodelet"] else None)
for datum in node_data.get("advertise", ()):
loc = cls._loc(pkg, datum)
pub = AdvertiseCall(datum["name"], datum["namespace"],
datum["type"], datum["queue"],
latched=datum.get("latched", False),
control_depth=datum["depth"],
repeats=datum["repeats"],
conditions=[SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.advertise.append(pub)
for datum in node_data.get("subscribe", ()):
loc = cls._loc(pkg, datum)
sub = SubscribeCall(datum["name"], datum["namespace"],
datum["type"], datum["queue"],
control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.subscribe.append(sub)
for datum in node_data.get("service", ()):
loc = cls._loc(pkg, datum)
srv = AdvertiseServiceCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.service.append(srv)
for datum in node_data.get("client", ()):
loc = cls._loc(pkg, datum)
cli = ServiceClientCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.client.append(cli)
for datum in node_data.get("readParam", ()):
loc = cls._loc(pkg, datum)
par = GetParamCall(datum["name"], datum["namespace"],
datum["type"], default_value=datum.get("default"),
control_depth=datum["depth"], repeats=datum["repeats"],
conditions=[SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.read_param.append(par)
for datum in node_data.get("writeParam", ()):
loc = cls._loc(pkg, datum)
par = SetParamCall(datum["name"], datum["namespace"],
datum["type"], value=datum.get("value"),
control_depth=datum["depth"], repeats=datum["repeats"],
conditions=[SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.write_param.append(par)
cls.log.debug("Hard-coded Node: " + str(node.to_JSON_object()))
return node
@classmethod
def _loc(cls, pkg, data):
loc = data.get("location")
if loc is None:
return None
p = loc.get("package")
if p is None or p != pkg.name:
return None
f = loc["file"]
for sf in pkg.source_files:
if sf.full_name == f:
f = sf
break
else:
parts = loc["file"].rsplit("/", 1)
if len(parts) == 1:
directory = ""
name = parts[0]
else:
assert len(parts) == 2
directory, name = parts
f = SourceFile(name, directory, pkg)
pkg.source_files.append(f)
return Location(pkg, file=f, line=loc["line"], col=loc["column"],
fun=loc.get("function"), cls=loc.get("class"))
###############################################################################
# Node Extractor
###############################################################################
class NodeExtractor(LoggingObject):
def __init__(self, pkgs, env, ws=None, node_cache=None, parse_nodes=False):
self.package = None
self.packages = pkgs
self.environment = env
self.workspace = ws
self.node_cache = node_cache
self.parse_nodes = parse_nodes
self.nodes = []
self.roscpp_extractor = None
self.rospy_extractor = None
def find_nodes(self, pkg):
self.log.debug("NodeExtractor.find_nodes(%s)", pkg)
self.package = pkg
srcdir = self.package.path[len(self.workspace):]
srcdir = os.path.join(self.workspace, srcdir.split(os.sep, 1)[0])
bindir = os.path.join(self.workspace, "build")
cmake_path = os.path.join(self.package.path, "CMakeLists.txt")
if os.path.isfile(cmake_path):
parser = RosCMakeParser(srcdir, bindir, pkgs = self.packages,
env = self.environment,
vars = self._default_variables())
parser.parse(cmake_path)
self._update_nodelets(parser.libraries)
self._register_nodes(parser.executables)
else:
# It may be normal for pure Python projects not to have a CMakeLists.txt
# Instead, search for python files with "def main():"
pattern = re.compile('^def\s+main\s*\(.*\)\s*:')
for file in pkg.source_files:
if file.language != 'python':
continue # continue with next file
entry_point_found = False
with open(file.path) as f:
for line in f:
match = pattern.match(line)
if match is not None:
entry_point_found = True
break
if entry_point_found == False:
continue # continue with next file
# else: this is a python file with a 'main' function,
# so we consider it a node.
node = Node(file.full_name, pkg)
node.source_files.append(file)
self.nodes.append(node)
self.package.nodes.append(node)
if self.parse_nodes:
self._extract_primitives()
def _default_variables(self):
# TODO: clean up these hardcoded values
v = {}
v["catkin_INCLUDE_DIRS"] = os.path.join(self.workspace,
"devel/include")
v["Boost_INCLUDE_DIRS"] = "/usr/include/"
v["Eigen_INCLUDE_DIRS"] = "/usr/include/eigen3"
v["ImageMagick_INCLUDE_DIRS"] = "/usr/include/ImageMagick"
v["PROJECT_SOURCE_DIR"] = self.package.path
return v
def _get_file(self, path):
for sf in self.package.source_files:
if sf.path == path:
return sf
return None
def _update_nodelets(self, libraries):
lib_files = {}
for target in libraries.values():
files = []
for path in target.files:
sf = self._get_file(path)
if sf:
files.append(sf)
for link in target.links:
for path in link.files:
sf = self._get_file(path)
if sf:
files.append(sf)
lib_files[target.prefixed_name] = files
for nodelet in self.package.nodes:
if not nodelet.is_nodelet:
continue
if nodelet.name in lib_files:
nodelet.source_files = lib_files[nodelet.name]
def _register_nodes(self, executables):
for target in executables.values():
node = Node(target.output_name, self.package)
for path in target.files:
sf = self._get_file(path)
if sf:
node.source_files.append(sf)
for link in target.links:
for path in link.files:
sf = self._get_file(path)
if sf:
node.source_files.append(sf)
lang = node.language
if lang == "cpp" or lang == "python":
self.log.debug("register %s node: %s", lang, node.node_name)
self.nodes.append(node)
self.package.nodes.append(node)
else:
self.log.debug("CMake target is not a node: %s (%s) %s",
node.node_name, lang, node.source_files)
def _extract_primitives(self, force_when_cached=False):
self.roscpp_extractor = RoscppExtractor(self.package, self.workspace)
self.rospy_extractor = RospyExtractor(self.package, self.workspace)
for i in range(len(self.package.nodes)):
node = self.package.nodes[i]
self.log.debug("Extracting primitives for node %s", node.id)
if node.source_tree is not None:
self.log.debug("Node already has a source tree. Skipped.")
continue
if (node.node_name in self.node_cache) and not force_when_cached:
self.log.debug("Using Node %s from cache.", node.node_name)
node = self.node_cache[node.node_name]
assert node.package is self.package
self.package.nodes[i] = node
continue
node.source_tree = CodeGlobalScope()
node.advertise = []
node.subscribe = []
node.service = []
node.client = []
node.read_param = []
node.write_param = []
if not node.source_files:
self.log.warning("no source files for node " + node.id)
if node.language == "cpp" and CppAstParser is not None:
self.roscpp_extractor.extract(node)
elif node.language == "python":
self.rospy_extractor.extract(node)
else:
self.log.debug("Node written in %s.", node.language)
self.log.debug("Skipping parsing and primitive extraction.")
###############################################################################
# C++ Primitive Extractor
###############################################################################
class RoscppExtractor(LoggingObject):
def __init__(self, package, workspace):
self.package = package
self.workspace = workspace
def extract(self, node):
self.log.debug("Parsing C++ files for node %s", node.id)
parser = CppAstParser(workspace=self.workspace, logger=__name__)
for sf in node.source_files:
self.log.debug("Parsing C++ file %s", sf.path)
if parser.parse(sf.path) is None:
self.log.warning("no compile commands for " + sf.path)
node.source_tree = parser.global_scope
# ----- queries after parsing, since global scope is reused -----------
self._query_comm_primitives(node, parser.global_scope)
self._query_nh_param_primitives(node, parser.global_scope)
self._query_param_primitives(node, parser.global_scope)
def _query_comm_primitives(self, node, gs):
for call in CodeQuery(gs).all_calls.where_name("advertise").get():
if call.canonical_type != "ros::Publisher":
continue
self._on_publication(node,
self._resolve_node_handle(call.method_of), call)
for call in CodeQuery(gs).all_calls.where_name("subscribe").get():
if call.canonical_type != "ros::Subscriber":
continue
self._on_subscription(node,
self._resolve_node_handle(call.method_of), call)
for call in CodeQuery(gs).all_calls.where_name("advertiseService").get():
if call.canonical_type != "ros::ServiceServer":
continue
self._on_service(node,
self._resolve_node_handle(call.method_of), call)
for call in CodeQuery(gs).all_calls.where_name("serviceClient").get():
if call.canonical_type != "ros::ServiceClient":
continue
self._on_client(node,
self._resolve_node_handle(call.method_of), call)
self.log.debug("Looking for image_transport::SubscriberFilter calls.")
for call in CodeQuery(gs).all_calls.where_name("SubscriberFilter").get():
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
if isinstance(call.reference, str):
if not call.reference.startswith("c:@N@image_transport@S@SubscriberFilter"):
continue
if not "image_transport::SubscriberFilter" in call.canonical_type:
continue
n = call.arguments[0] if call.arguments else None
self._on_subscription(node, self._resolve_it_node_handle(n),
call, topic_pos = 1, queue_pos = 2,
msg_type = "sensor_msgs/Image")
self.log.debug("Looking for message_filters::Subscriber calls.")
for call in CodeQuery(gs).all_calls.where_name("Subscriber").get():
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
if isinstance(call.reference, str):
if not call.reference.startswith("c:@N@message_filters@S@Subscriber"):
continue
if not "message_filters::Subscriber" in call.canonical_type:
continue
n = call.arguments[0] if call.arguments else None
self._on_subscription(node, self._resolve_node_handle(n),
call, topic_pos = 1, queue_pos = 2)
self.log.debug("Looking for image_transport::Subscriber calls.")
for call in CodeQuery(gs).all_calls.where_name("subscribe").get():
if call.canonical_type != "image_transport::Subscriber":
continue
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
n = call.method_of if call.method_of else None
self._on_subscription(node, self._resolve_it_node_handle(n),
call, msg_type = "sensor_msgs/Image")
self.log.debug("Looking for image_transport::Publisher.")
for call in CodeQuery(gs).all_calls.where_name("advertise").get():
if call.canonical_type != "image_transport::Publisher":
continue
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
n = call.method_of if call.method_of else None
self._on_publication(node, self._resolve_it_node_handle(n),
call, msg_type = "sensor_msgs/Image")
def _query_nh_param_primitives(self, node, gs):
nh_prefix = "c:@N@ros@S@NodeHandle@"
gets = ("getParam", "getParamCached", "param")
reads = gets + ("hasParam", "searchParam")
for call in CodeQuery(gs).all_calls.where_name(reads).get():
if (call.full_name.startswith("ros::NodeHandle")
or (isinstance(call.reference, str)
and call.reference.startswith(nh_prefix))):
param_type = default_value = None
if call.name in gets:
param_type = self._extract_param_type(call.arguments[1])
if call.name == "param":
if len(call.arguments) > 2:
default_value = self._extract_param_value(
call, arg_pos=2)
elif len(call.arguments) == 2:
default_value = self._extract_param_value(
call, arg_pos=1)
self._on_read_param(node, self._resolve_node_handle(call),
call, param_type, default_value)
sets = ("setParam",)
writes = sets + ("deleteParam",)
for call in CodeQuery(gs).all_calls.where_name(writes).get():
if (call.full_name.startswith("ros::NodeHandle")
or (isinstance(call.reference, str)
and call.reference.startswith(nh_prefix))):
param_type = value = None
if len(call.arguments) >= 2 and call.name in sets:
param_type = self._extract_param_type(call.arguments[1])
value = self._extract_param_value(call, arg_pos=1)
self._on_write_param(node, self._resolve_node_handle(call),
call, param_type, value)
def _query_param_primitives(self, node, gs):
ros_prefix = "c:@N@ros@N@param@"
gets = ("get", "getCached", "param")
reads = gets + ("has",)
for call in CodeQuery(gs).all_calls.where_name(reads).get():
if (call.full_name.startswith("ros::param")
or (isinstance(call.reference, str)
and call.reference.startswith(ros_prefix))):
param_type = default_value = None
if call.name in gets:
param_type = self._extract_param_type(call.arguments[1])
if call.name == "param":
if len(call.arguments) > 2:
default_value = self._extract_param_value(
call, arg_pos=2)
elif len(call.arguments) == 2:
default_value = self._extract_param_value(
call, arg_pos=1)
self._on_read_param(node, "", call, param_type, default_value)
for call in (CodeQuery(gs).all_calls.where_name("search")
.where_result("bool").get()):
if (call.full_name.startswith("ros::param")
or (isinstance(call.reference, str)
and call.reference.startswith(ros_prefix))):
if len(call.arguments) > 2:
ns = resolve_expression(call.arguments[0])
if not isinstance(ns, basestring):
ns = "?"
else:
ns = "~"
self._on_read_param(node, ns, call, None, None)
sets = ("set",)
writes = sets + ("del",)
for call in CodeQuery(gs).all_calls.where_name(writes).get():
if (call.full_name.startswith("ros::param")
or (isinstance(call.reference, str)
and call.reference.startswith(ros_prefix))):
param_type = value = None
if len(call.arguments) >= 2 and call.name in sets:
param_type = self._extract_param_type(call.arguments[1])
value = self._extract_param_value(call, arg_pos=1)
self._on_write_param(node, "", call, param_type, value)
def _on_publication(self, node, ns, call, topic_pos=0, queue_pos=1,
msg_type=None, latch_pos=-1):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call, topic_pos=topic_pos)
msg_type = msg_type or self._extract_message_type(call)
queue_size = self._extract_queue_size(call, queue_pos=queue_pos)
latched = False
if len(call.arguments) >= 3 and len(call.arguments) > latch_pos:
latched = self._extract_latch(call, latch_pos)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
pub = AdvertiseCall(name, ns, msg_type, queue_size, latched=latched,
location=location, control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.advertise.append(pub)
self.log.debug("Found AdvertiseCall on %s/%s (%s)", ns, name, msg_type)
def _on_subscription(self, node, ns, call, topic_pos=0, queue_pos=1,
msg_type=None):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call, topic_pos=topic_pos)
msg_type = msg_type or self._extract_message_type(call)
queue_size = self._extract_queue_size(call, queue_pos=queue_pos)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
sub = SubscribeCall(name, ns, msg_type, queue_size, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.subscribe.append(sub)
self.log.debug("Found SubscribeCall on %s/%s (%s)", ns, name, msg_type)
def _on_service(self, node, ns, call):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call)
msg_type = self._extract_message_type(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
srv = AdvertiseServiceCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.service.append(srv)
self.log.debug("Found Service on %s/%s (%s)", ns, name, msg_type)
def _on_client(self, node, ns, call):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call)
msg_type = self._extract_message_type(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
cli = ServiceClientCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.client.append(cli)
self.log.debug("Found Client on %s/%s (%s)", ns, name, msg_type)
def _on_read_param(self, node, ns, call, param_type, default_value):
if len(call.arguments) < 1:
return
name = self._extract_topic(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
read = GetParamCall(name, ns, param_type,
default_value=default_value, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive = True))
node.read_param.append(read)
self.log.debug("Found Read on %s/%s (%s) (%s)",
ns, name, param_type, default_value)
def _on_write_param(self, node, ns, call, param_type, value):
if len(call.arguments) < 1:
return
name = self._extract_topic(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
wrt = SetParamCall(name, ns, param_type, value=value,
location=location, control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive = True))
node.write_param.append(wrt)
self.log.debug("Found Write on %s/%s (%s) (%s)",
ns, name, param_type, value)
def _condition_location(self, condition_obj, sf):
if sf is not None:
if sf.path != condition_obj.file:
self.log.debug(("condition Location: files do not match: "
"'%s', '%s'"), sf.path, condition_obj.file)
if condition_obj.file.startswith(self.package.path):
for sf2 in self.package.source_files:
if sf2.path == condition_obj.file:
sf = sf2
break
self.log.debug("Location: found correct file")
return Location(self.package, file=sf, line=condition_obj.line,
col=condition_obj.column, fun=condition_obj.function.name)
def _call_location(self, call):
try:
source_file = next(
sf
for sf in self.package.source_files
if sf.path == call.file)
except StopIteration:
source_file = None
function = call.function
if function:
function = function.name
return Location(self.package, file=source_file,
line=call.line, col=call.column, fun=function)
def _resolve_it_node_handle(self, value):
value = resolve_expression(value)
if (isinstance(value, CppFunctionCall)
and value.name == "ImageTransport"):
return self._resolve_node_handle(value.arguments[0])
return "?"
def _resolve_node_handle(self, call):
ns = "?"
node_handle = getattr(call, 'method_of', None) or call
if getattr(node_handle, 'name', None) == 'operator->':
node_handle = node_handle.arguments[0]
node_handle_def = None
if isinstance(node_handle, CppReference):
node_handle_def = resolve_reference(node_handle)
elif isinstance(node_handle, CppDefaultArgument):
return ''
# A function needs to be called to create a NodeHandle (constructors
# are functions)
if isinstance(node_handle_def, CppFunctionCall):
# node_handle_def is a call to the constructor
if node_handle_def.name == 'NodeHandle':
args = node_handle_def.arguments
# Copy constructor
if len(args) == 1:
parent = args[0]
if isinstance(parent, CppFunctionCall):
if parent.name == 'getNodeHandle':
return ''
elif parent.name == 'getPrivateNodeHandle':
return '~'
return self._resolve_node_handle(parent)
# All other constructor have at least two arguments. The third
# is never meaningful
# If a parent NodeHande is passed, it is the first argument
# If a namespace argument is passed, it is either first or
# second parameter. Only the first has an empty default value.
prefix = ''
if isinstance(args[0], basestring):
ns = args[0]
elif isinstance(args[0], CppDefaultArgument):
ns = ''
elif isinstance(args[1], basestring):
prefix = self._resolve_node_handle(args[0])
ns = args[1]
else:
ns = "?"
if prefix:
ns = prefix + "/" + ns
elif node_handle_def.name == 'getNodeHandle':
ns = ''
elif node_handle_def.name == 'getPrivateNodeHandle':
ns = '~'
elif isinstance(node_handle_def, CppDefaultArgument):
ns = ''
return ns
def _extract_topic(self, call, topic_pos=0):
name = resolve_expression(call.arguments[topic_pos])
if not isinstance(name, basestring):
name = "?"
return name or "?"
def _extract_message_type(self, call):
if call.template:
template = call.template[0]
std_alloc = re.search("_<std::allocator<void>", template)
if std_alloc is not None:
template = template[:std_alloc.start()]
#assert re.match(r"\w+::\w+$", template)
if not re.match(r"\w+::\w+$", template):
self.log.debug("Weird message type: " + repr(template))
return template.replace("::", "/")
if (call.name not in ("subscribe", "advertiseService")
and 'NodeHandle' not in call.full_name):
return "?"
callback = (call.arguments[2]
if call.name == "subscribe"
else call.arguments[1])
while isinstance(callback, CppOperator):
callback = callback.arguments[0]
type_string = callback.result
try:
type_string = type_string.split(None, 1)[1]
except IndexError:
type_string = type_string.strip()
if type_string.startswith("(*)"):
type_string = type_string[3:]
if type_string[0] == "(" and type_string[-1] == ")":
type_string = type_string[1:-1]
if call.name == "advertiseService":
type_string = type_string.split(", ")[0]
is_const = type_string.startswith("const ")
if is_const:
type_string = type_string[6:]
is_ref = type_string.endswith(" &")
if is_ref:
type_string = type_string[:-2]
is_ptr = type_string.endswith("::ConstPtr")
if is_ptr:
type_string = type_string[:-10]
else:
is_ptr = type_string.endswith("ConstPtr")
if is_ptr:
type_string = type_string[:-8]
if type_string.endswith("::Request"):
type_string = type_string[:-9]
if type_string.startswith("boost::function"):
type_string = type_string[52:-25]
type_string = type_string.replace("::", "/")
if re.match(r"\w+/\w+$", type_string):
return type_string
return "?"
def _extract_action(self, call):
name = "?"
if "SimpleActionServer" in call.canonical_type and len(call.arguments) > 2:
arg = call.arguments[1]
if not isinstance(arg, basestring):
arg = resolve_expression(arg)
if isinstance(arg, basestring):
name = arg.split()[-1].replace("'", "")
elif "SimpleActionClient" in call.canonical_type and len(call.arguments) > 1:
if isinstance(call.arguments[0], basestring):
name = call.arguments[0]
return name
def _extract_action_type(self, call):
type_string = call.template[0]
return type_string.replace("::", "/")
def _extract_action(self, call):
name = "?"
if "SimpleActionServer" in call.canonical_type and len(call.arguments) > 2:
arg = call.arguments[1]
if not isinstance(arg, basestring):
arg = resolve_expression(arg)
if isinstance(arg, basestring):
name = arg.split()[-1].replace("'", "")
elif "SimpleActionClient" in call.canonical_type and len(call.arguments) > 1:
if isinstance(call.arguments[0], basestring):
name = call.arguments[0]
return name
def _extract_action_type(self, call):
type_string = call.template[0]
return type_string.replace("::", "/")
def _extract_queue_size(self, call, queue_pos=1):
queue_size = resolve_expression(call.arguments[queue_pos])
if isinstance(queue_size, (int, float)):
return queue_size
return None
def _extract_latch(self, call, latch_pos):
expr = call.arguments[latch_pos]
self.log.debug("extract latched publisher from {!r}".format(expr))
if isinstance(expr, CppDefaultArgument):
self.log.debug("latch is default: false")
return False
latch = resolve_expression(expr)
self.log.debug("resolve latch expr returns {!r}".format(latch))
if not isinstance(latch, bool):
return None
return latch
def _extract_param_type(self, value):
self.log.debug("extract param type from {}".format(repr(value)))
if value is True or value is False:
return "bool"
if isinstance(value, int):
return "int"
if isinstance(value, float):
return "double"
if isinstance(value, basestring):
return "str"
cpp_type = getattr(value, "result", None)
if cpp_type:
self.log.debug("param type from C++ type {}".format(repr(cpp_type)))
if cpp_type == "std::string" or cpp_type == "char *":
return "str"
if cpp_type == "int":
return "int"
if cpp_type == "double":
return "double"
if cpp_type == "bool":
return "bool"
return "yaml" if cpp_type else None
def _extract_param_value(self, call, arg_pos=1):
self.log.debug("extract_param_value({!r}, pos={})".format(
call.arguments, arg_pos))
if len(call.arguments) <= arg_pos:
self.log.debug("Failed to extract param value: not enough arguments")
return None
value = resolve_expression(call.arguments[arg_pos])
if isinstance(value, CppEntity):
self.log.debug("Failed to extract param value: " + repr(value))
return None
return value
###############################################################################
# Python Primitive Extractor
###############################################################################
class RospyExtractor(LoggingObject):
queue_size_pos = {
'publisher': 6,
'subscriber': 4,
}
rospy_names = {
'publication': ('Publisher',),
'subscription': ('Subscriber',),
'service-def': ('Service',),
'service-call': ('ServiceProxy',),
}
@classmethod
def all_rospy_names(cls, type):
names = cls.rospy_names[type]
return tuple('rospy.' + name for name in names) + names
@staticmethod
def get_arg(call, pos, name):
try:
return next(
keyword.value
for keyword in call.named_args
if keyword.name == name)
except StopIteration:
try:
return call.arguments[pos]
except IndexError:
return None
@staticmethod
def invalid_call(call, n=1):
return (len(call.arguments) + len(call.named_args)
+ bool(call.star_args) + bool(call.kw_args)) <= n
@staticmethod
def split_ns_name(full_name):
if '/' in full_name:
ns, _, name = full_name.rpartition('/')
else:
ns, name = '', full_name
return ns, name
def _call_location(self, call):
try:
source_file = next(
sf
for sf in self.package.source_files
if sf.path == call.file)
except StopIteration:
souce_file = None
function = call.function
if function:
function = function.name
return Location(self.package, file=source_file, line=call.line,
fun=function)
@classmethod
def _extract_queue_size(cls, call):
pos = cls.queue_size_pos[call.name.lower()]
queue_size_arg = cls.get_arg(call, pos, 'queue_size')
try:
queue_size = resolve_expression(queue_size_arg)
assert(isinstance(queue_size, (int, float)))
return queue_size
except AssertionError:
return None
@classmethod
def _extract_message_type(cls, call, arg_name, msgs_imports, pkgs_imports, arg_pos=1):
msg_type = cls.get_arg(call, 1, arg_name)
# Very common case of calling type() on a message class
if isinstance(msg_type, CodeFunctionCall) and msg_type.name == 'type':
msg_type = msg_type.arguments[0].name
if isinstance(msg_type, CodeReference):
msg_type = resolve_reference(msg_type) or msg_type
if isinstance(msg_type, CodeReference):
if msg_type.field_of is None:
for pkg_name, msg_name in msgs_imports:
if msg_name == msg_type.name:
return pkg_name + "/" + msg_name
else:
maybe_pkg = msg_type.field_of
if isinstance(maybe_pkg, CodeReference):
pkg_name = maybe_pkg.name
if pkg_name in pkgs_imports:
return pkg_name + "/" + msg_type.name
return "?"
@classmethod
def _extract_topic(cls, call):
name = resolve_expression(cls.get_arg(call, 0, 'name'))
if not isinstance(name, basestring):
name = '?'
return cls.split_ns_name(name)
def _on_client(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'service_class', self.msgs_list, self.pkgs_list)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
cli = ServiceClientCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.client.append(cli)
self.log.debug("Found Client on %s/%s (%s)", ns, name, msg_type)
def _on_publication(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'data_class', self.msgs_list, self.pkgs_list)
queue_size = self._extract_queue_size(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
pub = AdvertiseCall(name, ns, msg_type, queue_size, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.advertise.append(pub)
self.log.debug("Found AdvertiseCall on %s/%s (%s)", ns, name, msg_type)
def _on_service(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'service_class', self.msgs_list, self.pkgs_list)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
srv = AdvertiseServiceCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.service.append(srv)
self.log.debug("Found Service on %s/%s (%s)", ns, name, msg_type)
def _on_subscription(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'data_class', self.msgs_list, self.pkgs_list)
queue_size = self._extract_queue_size(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
sub = SubscribeCall(name, ns, msg_type, queue_size, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.subscribe.append(sub)
self.log.debug("Found SubscribeCall on %s/%s (%s)", ns, name, msg_type)
def _query_comm_primitives(self, node, gs):
##################################
# Topics
##################################
publications = (CodeQuery(gs).all_calls
.where_name(('Publisher', 'rospy.Publisher'))
.get())
subscriptions = (CodeQuery(gs).all_calls
.where_name(('Subscriber', 'rospy.Subscriber'))
.get())
for call in publications:
self._on_publication(node, call)
for call in subscriptions:
self._on_subscription(node, call)
##################################
# Services
##################################
service_defs = (CodeQuery(gs).all_calls
.where_name(self.all_rospy_names('service-def'))
.get())
service_calls = (CodeQuery(gs).all_calls
.where_name(self.all_rospy_names('service-call'))
.get())
for call in service_defs:
self._on_service(node, call)
for call in service_calls:
self._on_client(node, call)
def _on_param_getter(self, node, call):
if self.invalid_call(call, n=0):
return
name = resolve_expression(self.get_arg(call, 0, 'param_name'))
if not isinstance(name, basestring):
name = '?'
ns, name = self.split_ns_name(name)
param_type = None
default_value = self.get_arg(call, 1, 'default')
if default_value is not None:
default_value = resolve_expression(default_value)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
getter = GetParamCall(name, ns, param_type,
default_value=default_value, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.read_param.append(getter)
self.log.debug("Found GetParamCall on %s/%s", ns, name)
def _on_param_setter(self, node, call):
if self.invalid_call(call):
return
name = resolve_expression(self.get_arg(call, 0, 'param_name'))
if not isinstance(name, basestring):
name = '?'
ns, name = self.split_ns_name(name)
param_type = None
value = resolve_expression(self.get_arg(call, 1, 'param_value'))
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
setter = SetParamCall(name, ns, param_type, value=value,
location=location, control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.write_param.append(setter)
self.log.debug("Found SetParamCall on %s/%s", ns, name)
def _query_param_primitives(self, node, gs):
getters = (CodeQuery(gs).all_calls
.where_name(('get_param', 'rospy.get_param'))
.get())
setters = (CodeQuery(gs).all_calls
.where_name(('set_param', 'rospy.set_param'))
.get())
for call in getters:
self._on_param_getter(node, call)
for call in setters:
self._on_param_setter(node, call)
# FIXME: missing:
# rospy.has_param(param_name)
# rospy.delete_param(param_name)
def _setup_path(self):
setup_file = os.path.join(self.package.path, 'setup.py')
if not os.path.isfile(setup_file):
return []
parser = PyAstParser(workspace=self.package.path)
setup = parser.parse(setup_file)
setup_call = (CodeQuery(setup).all_calls
.where_name('generate_distutils_setup')
.get()
or
CodeQuery(setup).all_calls
.where_name('setup')
.get())[0]
package_dir = self.get_arg(setup_call, 0, 'package_dir')
if hasattr(package_dir, 'value'):
package_dir = {
keyword.name: keyword.value
for keyword in self.get_arg(setup_call, 0, 'package_dir').value
}
else:
src_path = os.path.join(self.package.path, 'src')
package_dir = {'': 'src'} if os.path.exists(src_path) else {}
root = package_dir.get('', '')
return [os.path.join(self.package.path, root)]
def __init__(self, package, workspace):
self.package = package
self.workspace = workspace
self.pythonpath = self._setup_path()
def extract(self, node):
self.log.debug("Parsing Python files for node %s", node.id)
self.log.debug("PyAstParser(pythonpath={!r}, workspace={!r})".format(
self.pythonpath, self.workspace))
parser = PyAstParser(pythonpath=self.pythonpath,
workspace=self.workspace)
for sf in node.source_files:
self.log.debug("Parsing Python file %s", sf.path)
if parser.parse(sf.path) is None:
self.log.warning("no compile commands for " + sf.path)
node.source_tree = parser.global_scope
# In theory the imported names list should not be needed here, this is a fix to be able to locate the complete description of ros msgs types (i.e. PkgName/MsgName
self.msgs_list = []
self.pkgs_list = []
for imp_name in parser.imported_names_list:
s = str(imp_name)
if "msg" in s or "srv" in s:
ss = s.split(".")
if len(ss) < 2:
continue
if ss[-1] == "msg" or ss[-1] == "srv":
self.pkgs_list.append(ss[0])
elif ss[1] == "msg" or ss[1] == "srv":
self.msgs_list.append((ss[0], ss[2]))
else:
self.log.debug(("Python import with 'msg' or 'srv', "
"but unable to process it: ")
+ s)
# ----- queries after parsing, since global scope is reused -----------
self._query_comm_primitives(node, parser.global_scope)
self._query_param_primitives(node, parser.global_scope)
###############################################################################
# Node Hints
###############################################################################
class NodeHints2(LoggingObject):
# pkg/node:
# fix: (fix variables)
# advertise@1: name
# getParam@1: true
# advertise: (always adds)
# - full JSON spec
# - full JSON spec
def __init__(self, hints, pkg_finder=None):
if not isinstance(hints, dict):
raise ValueError("expected dict of hints, got " + repr(hints))
for key, value in hints.items():
if not isinstance(key, basestring) or key.count("/") != 1:
raise ValueError("expected 'pkg/node' key, found " + repr(key))
if not isinstance(value, dict):
raise ValueError("expected dict value, found " + repr(value))
self.hints = hints
self.pkg_finder = pkg_finder
def apply_to(self, nodes, create=False):
if not self.hints:
return []
nodes = self._list_to_dict(nodes)
if create and not self.pkg_finder:
raise ValueError("received create=True but no pkg_finder")
new_nodes = []
for node_type, node_hints in self.hints.items():
node = nodes.get(node_type)
if node is not None:
fix_hints = node_hints.get("fix", _EMPTY_DICT)
if not isinstance(fix_hints, dict):
raise ValueError("expected dict in {}:fix; got {!r}".format(
node_type, fix_hints))
self.log.info("Merging extracted Node with hints: " + node_type)
self.log.debug("node specs %s %s", node, node_hints)
node.resolve_variables(fix_hints)
elif create:
self.log.info("Creating new Node from hints: " + node_type)
self.log.debug("node specs %s %s", node_type, node_hints)
node = self._create(node_type, node_hints)
if node is not None:
new_nodes.append(node)
if node is not None:
self._add_primitives(node, node_hints)
hpl = node_hints.get("hpl", _EMPTY_DICT)
node.hpl_properties = list(hpl.get("properties", _EMPTY_LIST))
node.hpl_assumptions = list(hpl.get("assumptions", _EMPTY_LIST))
return new_nodes
def _create(self, node_type, hints):
pkg_name, exe = node_type.split("/")
pkg = self.pkg_finder.get("package:" + pkg_name)
if pkg is None:
self.log.error("Unable to find package: " + repr(pkg_name))
return None
rosname = hints.get("rosname")
nodelet_cls = hints.get("nodelet")
node = Node(exe, pkg, rosname=rosname, nodelet=nodelet_cls)
return node
def _add_primitives(self, node, hints):
for key, attr, cls in self._PRIMITIVES:
calls = getattr(node, attr)
for datum in hints.get(key, _EMPTY_LIST):
call = cls.from_JSON_specs(datum)
call.location = self._location_from_JSON(datum.get("location"))
calls.append(call)
_PRIMITIVES = (
("advertise", "advertise", AdvertiseCall),
("subscribe", "subscribe", SubscribeCall),
("advertiseService", "service", AdvertiseServiceCall),
("serviceClient", "client", ServiceClientCall),
("getParam", "read_param", GetParamCall),
("setParam", "write_param", SetParamCall)
)
def _list_to_dict(self, nodes):
if isinstance(nodes, dict):
return nodes
return {node.node_name: node for node in nodes}
# FIXME code duplication
def _location_from_JSON(self, datum):
if datum is None:
return None
pkg = self.pkg_finder.get("package:" + datum["package"])
if pkg is None:
self.log.error("Unable to find package: " + repr(datum["package"]))
return None
source_file = None
filename = datum["file"]
if filename:
try:
source_file = next(sf for sf in pkg.source_files
if sf.full_name == filename)
except StopIteration:
self.log.error("Unable to find file: '{}/{}'".format(
datum["package"], filename))
return Location(pkg, file=source_file,
line=datum.get("line", 1), col=datum.get("column", 1),
fun=datum.get("function"), cls=datum.get("class"))
| _build_node |
token.py | from .iterator import Span, RawIterator
| self.end = end.copy()
@property
def raw(self):
return str(Span(RawIterator(self.start), RawIterator(self.end)))
def __str__(self):
return str(Span(self.start, self.end))
class EmptyToken(Token):
def __init__(self):
super().__init__(RawIterator(''), RawIterator(''))
print(self)
class StartToken(Token):
pass
class EndToken(Token):
pass | class Token:
def __init__(self, start, end):
self.start = start.copy() |
package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cblas(Package):
"""The BLAS (Basic Linear Algebra Subprograms) are routines that
provide standard building blocks for performing basic vector and
matrix operations."""
homepage = "http://www.netlib.org/blas/_cblas/"
# tarball has no version, but on the date below, this MD5 was correct.
version('2015-06-06', sha256='0f6354fd67fabd909baf57ced2ef84e962db58fae126e4f41b21dd4fec60a2a3',
url='http://www.netlib.org/blas/blast-forum/cblas.tgz')
depends_on('blas')
parallel = False
def patch(self):
mf = FileFilter('Makefile.in')
mf.filter('^BLLIB =.*', 'BLLIB = {0}'.format(
' '.join(self.spec['blas'].libs.libraries)))
mf.filter('^CC =.*', 'CC = cc')
mf.filter('^FC =.*', 'FC = fc')
def install(self, spec, prefix):
| make('all')
mkdirp(prefix.lib)
mkdirp(prefix.include)
# Rename the generated lib file to libcblas.a
install('lib/cblas_LINUX.a', prefix.lib.join('libcblas.a'))
install('include/cblas.h', prefix.include)
install('include/cblas_f77.h', prefix.include) |
|
link_linux.go | package tenus
import (
"fmt"
"net"
"os"
"syscall"
"github.com/docker/libcontainer/netlink"
"github.com/docker/libcontainer/system"
)
// LinkOptions allows you to specify network link options.
type LinkOptions struct {
// MAC address
MacAddr string
// Maximum Transmission Unit
MTU int
// Link network flags i.e. FlagUp, FlagLoopback, FlagMulticast
Flags net.Flags
// Network namespace in which the network link should be created
Ns int
}
// Linker is a generic Linux network link
type Linker interface {
// NetInterface returns the link's logical network interface
NetInterface() *net.Interface
// DeleteLink deletes the link from Linux host
DeleteLink() error
// SetLinkMTU sets the link's MTU.
SetLinkMTU(int) error
// SetLinkMacAddress sets the link's MAC address.
SetLinkMacAddress(string) error
// SetLinkUp brings the link up
SetLinkUp() error
// SetLinkDown brings the link down
SetLinkDown() error
// SetLinkIp configures the link's IP address
SetLinkIp(net.IP, *net.IPNet) error
// UnsetLinkIp remove and IP address from the link
UnsetLinkIp(net.IP, *net.IPNet) error
// SetLinkDefaultGw configures the link's default gateway
SetLinkDefaultGw(*net.IP) error
// SetLinkNetNsPid moves the link to network namespace specified by PID
SetLinkNetNsPid(int) error
// SetLinkNetInNs configures network settings of the link in network namespace
SetLinkNetInNs(int, net.IP, *net.IPNet, *net.IP) error
}
// Link has a logical network interface
type Link struct {
ifc *net.Interface
}
// NewLink creates new network link on Linux host.
//
// It is equivalent of running: ip link add name ${ifcName} type dummy
// NewLink returns Linker which is initialized to a pointer of type Link if the
// link was created successfully on the Linux host.
// It returns error if the network link could not be created on Linux host.
func NewLink(ifcName string) (Linker, error) {
if ok, err := NetInterfaceNameValid(ifcName); !ok {
return nil, err
}
if _, err := net.InterfaceByName(ifcName); err == nil {
return nil, fmt.Errorf("Interface name %s already assigned on the host", ifcName)
}
if err := netlink.NetworkLinkAdd(ifcName, "dummy"); err != nil {
return nil, fmt.Errorf("Could not create new link %s: %s", ifcName, err)
}
newIfc, err := net.InterfaceByName(ifcName)
if err != nil {
return nil, fmt.Errorf("Could not find the new interface: %s", err)
}
return &Link{
ifc: newIfc,
}, nil
}
// NewLinkFrom creates new tenus link on Linux host from an existing interface of given name
func NewLinkFrom(ifcName string) (Linker, error) { | if ok, err := NetInterfaceNameValid(ifcName); !ok {
return nil, err
}
newIfc, err := net.InterfaceByName(ifcName)
if err != nil {
return nil, fmt.Errorf("Could not find the new interface: %s", err)
}
return &Link{
ifc: newIfc,
}, nil
}
// NewLinkWithOptions creates new network link on Linux host and sets some of its network
// parameters passed in as LinkOptions
//
// Calling NewLinkWithOptions is equivalent of running following commands one after another if
// particular option is passed in as a parameter:
// ip link add name ${ifcName} type dummy
// ip link set dev ${ifcName} address ${MAC address}
// ip link set dev ${ifcName} mtu ${MTU value}
// ip link set dev ${ifcName} up
// NewLinkWithOptions returns Linker which is initialized to a pointer of type Link if the network
// link with given LinkOptions was created successfully on the Linux host.
// It attempts to delete the link if any of the LinkOptions are incorrect or if setting the options
// failed and returns error.
func NewLinkWithOptions(ifcName string, opts LinkOptions) (Linker, error) {
if ok, err := NetInterfaceNameValid(ifcName); !ok {
return nil, err
}
if _, err := net.InterfaceByName(ifcName); err == nil {
return nil, fmt.Errorf("Interface name %s already assigned on the host", ifcName)
}
if err := netlink.NetworkLinkAdd(ifcName, "dummy"); err != nil {
return nil, fmt.Errorf("Could not create new link %s: %s", ifcName, err)
}
newIfc, err := net.InterfaceByName(ifcName)
if err != nil {
return nil, fmt.Errorf("Could not find the new interface: %s", err)
}
if (opts != LinkOptions{}) {
errOpts := setLinkOptions(newIfc, opts)
if errOpts != nil {
if errDel := DeleteLink(newIfc.Name); err != nil {
return nil, fmt.Errorf("Incorrect options specified: %s. Attempt to delete the link failed: %s",
errOpts, errDel)
}
return nil, fmt.Errorf("Could not set link options: %s", errOpts)
}
}
return &Link{
ifc: newIfc,
}, nil
}
// DeleteLink deletes netowrk link from Linux Host
// It is equivalent of running: ip link delete dev ${name}
func DeleteLink(name string) error {
return netlink.NetworkLinkDel(name)
}
// NetInterface returns link's logical network interface.
func (l *Link) NetInterface() *net.Interface {
return l.ifc
}
// DeleteLink deletes link interface on Linux host.
// It is equivalent of running: ip link delete dev ${interface name}
func (l *Link) DeleteLink() error {
return netlink.NetworkLinkDel(l.NetInterface().Name)
}
// SetLinkMTU sets link's MTU.
// It is equivalent of running: ip link set dev ${interface name} mtu ${MTU value}
func (l *Link) SetLinkMTU(mtu int) error {
return netlink.NetworkSetMTU(l.NetInterface(), mtu)
}
// SetLinkMacAddress sets link's MAC address.
// It is equivalent of running: ip link set dev ${interface name} address ${address}
func (l *Link) SetLinkMacAddress(macaddr string) error {
return netlink.NetworkSetMacAddress(l.NetInterface(), macaddr)
}
// SetLinkUp brings the link up.
// It is equivalent of running: ip link set dev ${interface name} up
func (l *Link) SetLinkUp() error {
return netlink.NetworkLinkUp(l.NetInterface())
}
// SetLinkDown brings the link down.
// It is equivalent of running: ip link set dev ${interface name} down
func (l *Link) SetLinkDown() error {
return netlink.NetworkLinkDown(l.NetInterface())
}
// SetLinkIp configures the link's IP address.
// It is equivalent of running: ip address add ${address}/${mask} dev ${interface name}
func (l *Link) SetLinkIp(ip net.IP, network *net.IPNet) error {
return netlink.NetworkLinkAddIp(l.NetInterface(), ip, network)
}
// UnsetLinkIp configures the link's IP address.
// It is equivalent of running: ip address del ${address}/${mask} dev ${interface name}
func (l *Link) UnsetLinkIp(ip net.IP, network *net.IPNet) error {
return netlink.NetworkLinkDelIp(l.NetInterface(), ip, network)
}
// SetLinkDefaultGw configures the link's default Gateway.
// It is equivalent of running: ip route add default via ${ip address}
func (l *Link) SetLinkDefaultGw(gw *net.IP) error {
return netlink.AddDefaultGw(gw.String(), l.NetInterface().Name)
}
// SetLinkNetNsPid moves the link to Network namespace specified by PID.
func (l *Link) SetLinkNetNsPid(nspid int) error {
return netlink.NetworkSetNsPid(l.NetInterface(), nspid)
}
// SetLinkNetInNs configures network settings of the link in network namespace specified by PID.
func (l *Link) SetLinkNetInNs(nspid int, ip net.IP, network *net.IPNet, gw *net.IP) error {
origNs, _ := NetNsHandle(os.Getpid())
defer syscall.Close(int(origNs))
defer system.Setns(origNs, syscall.CLONE_NEWNET)
if err := SetNetNsToPid(nspid); err != nil {
return fmt.Errorf("Setting network namespace failed: %s", err)
}
if err := netlink.NetworkLinkAddIp(l.NetInterface(), ip, network); err != nil {
return fmt.Errorf("Unable to set IP: %s in pid: %d network namespace", ip.String(), nspid)
}
if err := netlink.NetworkLinkUp(l.ifc); err != nil {
return fmt.Errorf("Unable to bring %s interface UP: %d", l.ifc.Name, nspid)
}
if gw != nil {
if err := netlink.AddDefaultGw(gw.String(), l.NetInterface().Name); err != nil {
return fmt.Errorf("Unable to set Default gateway: %s in pid: %d network namespace", gw.String(), nspid)
}
}
return nil
}
// SetLinkNsFd sets the link's Linux namespace to the one specified by filesystem path.
func (l *Link) SetLinkNsFd(nspath string) error {
fd, err := syscall.Open(nspath, syscall.O_RDONLY, 0)
if err != nil {
return fmt.Errorf("Could not attach to Network namespace: %s", err)
}
return netlink.NetworkSetNsFd(l.NetInterface(), fd)
}
// SetLinkNsToDocker sets the link's Linux namespace to a running Docker one specified by Docker name.
func (l *Link) SetLinkNsToDocker(name string, dockerHost string) error {
pid, err := DockerPidByName(name, dockerHost)
if err != nil {
return fmt.Errorf("Failed to find docker %s : %s", name, err)
}
return l.SetLinkNetNsPid(pid)
}
// RenameInterfaceByName renames an interface of given name.
func RenameInterfaceByName(old string, newName string) error {
iface, err := net.InterfaceByName(old)
if err != nil {
return err
}
return netlink.NetworkChangeName(iface, newName)
}
// setLinkOptions validates and sets link's various options passed in as LinkOptions.
func setLinkOptions(ifc *net.Interface, opts LinkOptions) error {
macaddr, mtu, flags, ns := opts.MacAddr, opts.MTU, opts.Flags, opts.Ns
// if MTU is passed in LinkOptions
if mtu != 0 {
if err := validMtu(mtu); err != nil {
return err
}
if err := netlink.NetworkSetMTU(ifc, mtu); err != nil {
return fmt.Errorf("Unable to set MTU: %s", err)
}
}
// if MacAddress is passed in LinkOptions
if macaddr != "" {
if err := validMacAddress(macaddr); err != nil {
return err
}
if err := netlink.NetworkSetMacAddress(ifc, macaddr); err != nil {
return fmt.Errorf("Unable to set MAC Address: %s", err)
}
}
// if ns is passed in LinkOptions
if ns != 0 {
if err := validNs(ns); err != nil {
return err
}
if err := netlink.NetworkSetNsPid(ifc, ns); err != nil {
return fmt.Errorf("Unable to set Network namespace: %s", err)
}
}
// if flags is passed in LinkOptions
if flags != 0 {
if err := validFlags(flags); err != nil {
return err
}
if ns != 0 && (ns != 1 || ns != os.Getpid()) {
if (flags & syscall.IFF_UP) == syscall.IFF_UP {
origNs, _ := NetNsHandle(os.Getpid())
defer syscall.Close(int(origNs))
defer system.Setns(origNs, syscall.CLONE_NEWNET)
if err := SetNetNsToPid(ns); err != nil {
return fmt.Errorf("Switching to %d network namespace failed: %s", ns, err)
}
if err := netlink.NetworkLinkUp(ifc); err != nil {
return fmt.Errorf("Unable to bring %s interface UP: %d", ifc.Name, ns)
}
}
} else {
if err := netlink.NetworkLinkUp(ifc); err != nil {
return fmt.Errorf("Could not bring up network link %s: %s", ifc.Name, err)
}
}
}
return nil
} | |
test_tx.rs | use solana_sdk::{
hash::Hash,
instruction::CompiledInstruction,
signature::{Keypair, Signer},
stake,
system_instruction::SystemInstruction,
system_program, system_transaction,
transaction::Transaction,
};
use solana_vote_program::vote_transaction;
pub fn test_tx() -> Transaction {
let keypair1 = Keypair::new();
let pubkey1 = keypair1.pubkey();
let zero = Hash::default();
system_transaction::transfer(&keypair1, &pubkey1, 42, zero)
}
pub fn test_multisig_tx() -> Transaction {
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let keypairs = vec![&keypair0, &keypair1];
let lamports = 5;
let blockhash = Hash::default();
let transfer_instruction = SystemInstruction::Transfer { lamports };
let program_ids = vec![system_program::id(), stake::program::id()];
let instructions = vec![CompiledInstruction::new(
0,
&transfer_instruction,
vec![0, 1],
)];
Transaction::new_with_compiled_instructions(
&keypairs,
&[],
blockhash,
program_ids,
instructions,
)
}
pub fn vote_tx() -> Transaction | {
let keypair = Keypair::new();
vote_transaction::new_vote_transaction(
vec![2],
Hash::default(),
Hash::default(),
&keypair,
&keypair,
&keypair,
None,
)
} |
|
getResultData.ts | import { denormalize } from 'normalizr';
import { State } from '../reducer';
import { EndpointParams } from '../types';
import { getRequest } from './getRequest';
/**
* Get the de-normalized result data of an endpoint, or undefined if not (yet) available. This value is automatically
* bound when using {@link withApiData}.
*/
export const getResultData = (
state: State,
endpointKey: string,
params?: EndpointParams,
instanceId: string = ''
): any | any[] | void => {
const config = state.endpointConfig[endpointKey];
if (!config) {
if (process.env.NODE_ENV === 'development') {
console.warn(`apiData.getResult: configuration of endpoint ${endpointKey} not found.`);
}
return;
}
const request = getRequest(state, endpointKey, params, instanceId);
if (!request) {
return;
}
return request.networkStatus === 'failed'
? undefined
: request.result && (
config.responseSchema
? denormalize(request.result, config.responseSchema, state.entities) | }; | : request.result
); |
insert.rs | use super::Table;
use quote::quote_spanned;
/// INSERT INTO tablename (name1, name2...) VALUES (?1, ?2...)
pub(super) fn | (table: &Table) -> proc_macro2::TokenStream {
let sql = makesql_insert(&table);
super::validate_sql_or_abort(&sql);
// let idents = table.columns.iter().map(|c| &c.ident).collect::<Vec<_>>();
let columns = table
.columns
.iter()
.map(|c| {
let ident = &c.ident;
quote_spanned!(c.span=> &self.#ident as &dyn ::turbosql::ToSql)
})
.collect::<Vec<_>>();
quote_spanned! { table.span =>
#[allow(dead_code)]
pub fn insert(&self) -> ::turbosql::Result<i64> {
// #table::__turbosql_ensure_table_created();
assert!(self.rowid.is_none());
let db = ::turbosql::__TURBOSQL_DB.lock().unwrap(); // todo: use tokio's lock?
let mut stmt = db.prepare_cached(#sql)?;
stmt.insert(&[#(#columns),*] as &[&dyn ::turbosql::ToSql])
}
#[allow(dead_code)]
pub fn insert_batch(rows: &[#table]) {
for row in rows {
row.insert().unwrap();
}
}
}
}
fn makesql_insert(table: &Table) -> String {
let mut sql = format!("INSERT INTO {} (", table.name);
sql += table.columns.iter().map(|c| c.name.as_str()).collect::<Vec<_>>().join(", ").as_str();
sql += ") VALUES (";
sql += table.columns.iter().map(|_| "?").collect::<Vec<_>>().join(", ").as_str();
sql += ")";
sql
}
| insert |
test_unit_issuance_date.py | import unittest
from cert_issuer.models import validate_issuance_date
class | (unittest.TestCase):
def test_validate_issuance_date_invalid_RFC3339 (self):
candidate = '20200202'
try:
validate_issuance_date(candidate)
except:
assert True
return
assert False
def test_validate_issuance_date_valid_RFC3339 (self):
candidate = '2020-02-02T00:00:00Z'
try:
validate_issuance_date(candidate)
except:
assert False
return
assert True
if __name__ == '__main__':
unittest.main()
| UnitValidationV3 |
cifar100.py | import numpy as np
import matplotlib.pyplot as plt
import os
from tensorflow.keras.models import load_model
from tensorflow.keras.backend import clear_session
from keras.utils import to_categorical
import tensorflow.keras as keras
from .common import save_pickle, load_pickle
from tqdm import tqdm
# utils/data_iterators/cifar100_ResNet44v1_model.171.h5 => flatten
# utils/data_iterators/cifar100_ResNet44v1_model.171.h5 => activation_42
class CIFAR100_DataIterator:
| def __init__(self, train_data, test_data, batch_size = 32,
randomize = True, task_labels = None,
embedding_save_file = 'utils/data_iterators/cifar100_embedding.pkl',
embedding_model_file = 'utils/data_iterators/cifar100_ResNet44v1_model.171.h5',
embedding_model_layer = 'activation_42'): # 'flatten'):
assert(task_labels != None)
self.train_x, self.train_y = train_data
self.n = len(self.train_y)
print('Training examples = %d' % self.n)
self.test_x, self.test_y = test_data
self.tn = len(self.test_y)
print('Test examples = %d' % self.tn)
self.i = 0
self.batch_size = batch_size
print('Batch size = %d' % self.batch_size)
self.randomize = randomize
if randomize:
idx = np.random.permutation(self.n)
self.train_x = self.train_x[idx]
self.train_y = self.train_y[idx]
print('Shuffled training data')
self.orig_data = (np.copy(self.train_x), np.copy(self.train_y),
np.copy(self.test_x), np.copy(self.test_y))
self.embedding_save_file = embedding_save_file
self.embedding_model_file = embedding_model_file
self.embedding_model_layer = embedding_model_layer
self.reshape_dims = (64*8*8,) # (64,)
self.convert_to_embeddings()
self.n_tasks = len(task_labels)
self.task_labels = task_labels
self.n_labels_per_task = len(task_labels[0])
for t in self.task_labels: assert(len(t) == self.n_labels_per_task)
self.get_taskwise_data()
self.switch_task(0)
def img_fn_cifar100(img):
image = np.zeros((32,32,3), dtype=np.uint8)
image[...,0] = np.reshape(img[:1024], (32,32)) # Red channel
image[...,1] = np.reshape(img[1024:2048], (32,32)) # Green channel
image[...,2] = np.reshape(img[2048:], (32,32)) # Blue channel
return image
self.img_fn = img_fn_cifar100
def iterative_fn(self, fn, dataset, batches = 100):
ret = []
n = dataset.shape[0]
per_batch_size = n // batches
for i in tqdm(range(batches)):
if i+1 != batches:
ret += [fn(dataset[i*per_batch_size:(i+1)*per_batch_size])]
else:
ret += [fn(dataset[i*per_batch_size:])]
ret = np.vstack(ret)
return ret
def convert_to_embeddings(self):
if os.path.isfile(self.embedding_save_file):
print('Embedding file %s exists, skipping embedding generation.'
% self.embedding_save_file)
self.etrain_x, self.etest_x = load_pickle(self.embedding_save_file)
else:
assert(os.path.isfile(self.embedding_model_file))
model = load_model(self.embedding_model_file)
print("Loaded model: %s" % self.embedding_model_file)
train_x = self.train_x.astype('float32') / 255
train_x_mean = np.mean(train_x, axis = 0)
train_x -= train_x_mean
test_x = self.test_x.astype('float32') / 255
test_x -= train_x_mean
results = model.evaluate(test_x, to_categorical(self.test_y))
print("Test acc: %s" % results)
intermediate_layer = model.\
get_layer(self.embedding_model_layer).output
embedding_model = keras.Model(
inputs = model.input, outputs = intermediate_layer)
assert(len(self.reshape_dims) == 1)
dim = self.reshape_dims[0]
fn = lambda x: np.reshape(embedding_model.predict(x), [-1, dim])
self.etrain_x = self.iterative_fn(fn, train_x)
self.etest_x = self.iterative_fn(fn, test_x)
save_pickle([self.etrain_x, self.etest_x],
savefile = self.embedding_save_file)
clear_session()
print('Loaded embeddings.')
# Remap class labels eg. 33,2,4 => 0, 1, 2
def remap(self, x, classnums):
# print(x)
x = np.squeeze(x)
# curr_labels = np.unique(x)
# new_labels = {label: i for i, label in enumerate(curr_labels)}
new_labels = {label: i for i, label in enumerate(classnums)}
x_remapped = np.copy(x)
for i in range(x.shape[0]):
x_remapped[i] = new_labels[x[i]]
# print(np.unique(x), np.unique(x_remapped))
return x_remapped, new_labels
def get_taskwise_data(self):
self.tasks = {}
for i in range(self.n_tasks):
self.tasks[i] = {}
class_nums = self.task_labels[i]
tr_indices = np.array([np.where(self.train_y == class_num)[0] for \
class_num in class_nums]).flatten()
test_indices = np.array([np.where(self.test_y == class_num)[0] for \
class_num in class_nums]).flatten()
self.tasks[i]['train_x'] = self.etrain_x[tr_indices]
self.tasks[i]['img_train_x'] = self.train_x[tr_indices]
self.tasks[i]['train_y'], tr_labels = self.remap(self.train_y[tr_indices], class_nums)
self.tasks[i]['n'] = len(tr_indices)
if self.randomize:
idx = np.random.permutation(self.tasks[i]['n'])
self.tasks[i]['train_x'] = self.tasks[i]['train_x'][idx]
self.tasks[i]['img_train_x'] = self.tasks[i]['img_train_x'][idx]
self.tasks[i]['train_y'] = self.tasks[i]['train_y'][idx]
self.tasks[i]['test_x'] = self.etest_x[test_indices]
self.tasks[i]['img_test_x'] = self.test_x[test_indices]
self.tasks[i]['test_y'], test_labels = self.remap(self.test_y[test_indices], class_nums)
self.tasks[i]['tn'] = len(test_indices)
if self.randomize:
idx = np.random.permutation(self.tasks[i]['tn'])
self.tasks[i]['test_x'] = self.tasks[i]['test_x'][idx]
self.tasks[i]['img_test_x'] = self.tasks[i]['img_test_x'][idx]
self.tasks[i]['test_y'] = self.tasks[i]['test_y'][idx]
assert(tr_labels == test_labels)
def switch_task(self, new_task_idx):
assert(0 <= new_task_idx < self.n_tasks)
self.curr_idx = new_task_idx
self.n = self.tasks[self.curr_idx]['n']
self.tn = self.tasks[self.curr_idx]['tn']
self.train_x = self.tasks[self.curr_idx]['train_x']
self.img_train_x = self.tasks[self.curr_idx]['img_train_x']
self.train_y = np.squeeze(self.tasks[self.curr_idx]['train_y'])
self.test_x = self.tasks[self.curr_idx]['test_x']
self.img_test_x = self.tasks[self.curr_idx]['img_test_x']
self.test_y = np.squeeze(self.tasks[self.curr_idx]['test_y'])
# print('switch to %d: %s' % (new_task_idx, np.unique(self.test_y)))
def inspect(self):
print('inspect')
r, c = self.n_tasks, self.n_labels_per_task
xw = min(15, c)
yw = max(1.5*r, 10)
fig = plt.figure(figsize = (xw, yw))
subplot_i = 0
for task in range(self.n_tasks):
self.switch_task(task)
classes_to_show = np.unique(self.test_y)
all_indices = [np.where(self.test_y == class_num)[0] for class_num in classes_to_show]
n_ex = [len(item) for item in all_indices]
example_indices = [np.random.choice(item) for item in all_indices]
examples = self.img_test_x[example_indices]
for i, img_idx in enumerate(classes_to_show):
ax = fig.add_subplot(r, c, subplot_i+1)
ax.set_xticks(())
ax.set_yticks(())
label_human_readable = str(img_idx) # TODO
img = examples[img_idx]
ax.set_xlabel(label_human_readable)
plt.imshow(img, cmap='gray', interpolation='none')
subplot_i += 1
# plt.tight_layout(True)
plt.savefig("inspect.png")
plt.show()
def __iter__(self):
return self
def __next__(self):
if self.i+self.batch_size > self.n:
self.i = 0
ret_data = self.train_x[self.i:self.i+self.batch_size]
ret_labels = self.train_y[self.i:self.i+self.batch_size]
self.i += self.batch_size
return ret_data, ret_labels
def test(self, samples = 32):
idx = np.random.choice(self.tn, size = samples, replace = False)
return self.test_x[idx], self.test_y[idx] |
|
groupcache.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: server/pkg/cache/groupcachepb/groupcache.proto
package groupcachepb
import (
context "context"
encoding_binary "encoding/binary"
fmt "fmt"
github_com_golang_protobuf_proto "github.com/golang/protobuf/proto"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
io "io"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type GetRequest struct {
Group *string `protobuf:"bytes,1,req,name=group" json:"group,omitempty"`
Key *string `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetRequest) Reset() { *m = GetRequest{} }
func (m *GetRequest) String() string { return proto.CompactTextString(m) }
func (*GetRequest) ProtoMessage() {}
func (*GetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_eb7cb1df297b6970, []int{0}
}
func (m *GetRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GetRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetRequest.Merge(m, src)
}
func (m *GetRequest) XXX_Size() int {
return m.Size()
}
func (m *GetRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetRequest proto.InternalMessageInfo
func (m *GetRequest) GetGroup() string {
if m != nil && m.Group != nil {
return *m.Group
}
return ""
}
func (m *GetRequest) GetKey() string {
if m != nil && m.Key != nil {
return *m.Key
}
return ""
}
type GetResponse struct {
Value []byte `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
MinuteQps *float64 `protobuf:"fixed64,2,opt,name=minute_qps,json=minuteQps" json:"minute_qps,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetResponse) Reset() { *m = GetResponse{} }
func (m *GetResponse) String() string { return proto.CompactTextString(m) }
func (*GetResponse) ProtoMessage() {}
func (*GetResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_eb7cb1df297b6970, []int{1}
}
func (m *GetResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GetResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetResponse.Merge(m, src)
}
func (m *GetResponse) XXX_Size() int {
return m.Size()
}
func (m *GetResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetResponse proto.InternalMessageInfo
func (m *GetResponse) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func (m *GetResponse) GetMinuteQps() float64 {
if m != nil && m.MinuteQps != nil {
return *m.MinuteQps
}
return 0
}
func init() {
proto.RegisterType((*GetRequest)(nil), "groupcachepb.GetRequest")
proto.RegisterType((*GetResponse)(nil), "groupcachepb.GetResponse")
}
func init() {
proto.RegisterFile("server/pkg/cache/groupcachepb/groupcache.proto", fileDescriptor_eb7cb1df297b6970)
}
var fileDescriptor_eb7cb1df297b6970 = []byte{
// 242 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x2b, 0x4e, 0x2d, 0x2a,
0x4b, 0x2d, 0xd2, 0x2f, 0xc8, 0x4e, 0xd7, 0x4f, 0x4e, 0x4c, 0xce, 0x48, 0xd5, 0x4f, 0x2f, 0xca,
0x2f, 0x2d, 0x00, 0x33, 0x0b, 0x92, 0x90, 0x38, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x3c,
0xc8, 0xd2, 0x4a, 0x26, 0x5c, 0x5c, 0xee, 0xa9, 0x25, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25,
0x42, 0x22, 0x5c, 0xac, 0x60, 0x59, 0x09, 0x46, 0x05, 0x26, 0x0d, 0xce, 0x20, 0x08, 0x47, 0x48,
0x80, 0x8b, 0x39, 0x3b, 0xb5, 0x52, 0x82, 0x09, 0x2c, 0x06, 0x62, 0x2a, 0x39, 0x71, 0x71, 0x83,
0x75, 0x15, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x82, 0xb4, 0x95, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30,
0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x41, 0x38, 0x42, 0xb2, 0x5c, 0x5c, 0xb9, 0x99, 0x79, 0xa5, 0x25,
0xa9, 0xf1, 0x85, 0x05, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x8c, 0x41, 0x9c, 0x10, 0x91, 0xc0,
0x82, 0x62, 0x23, 0x2f, 0x2e, 0x2e, 0x77, 0x90, 0xf1, 0xce, 0x20, 0x97, 0x08, 0xd9, 0x70, 0x31,
0xbb, 0xa7, 0x96, 0x08, 0x49, 0xe8, 0x21, 0xbb, 0x4e, 0x0f, 0xe1, 0x34, 0x29, 0x49, 0x2c, 0x32,
0x10, 0xeb, 0x95, 0x18, 0x9c, 0x82, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1,
0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18, 0xa2, 0x1c, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4,
0x92, 0xf3, 0x73, 0xf5, 0x0b, 0x12, 0x93, 0x33, 0x2a, 0x53, 0x52, 0x8b, 0x90, 0x59, 0xc5, 0x45,
0xc9, 0xfa, 0x78, 0x03, 0x0e, 0x10, 0x00, 0x00, 0xff, 0xff, 0xba, 0x0d, 0xe5, 0xd5, 0x58, 0x01,
0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// GroupCacheClient is the client API for GroupCache service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type GroupCacheClient interface {
Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error)
}
type groupCacheClient struct {
cc *grpc.ClientConn
}
func NewGroupCacheClient(cc *grpc.ClientConn) GroupCacheClient {
return &groupCacheClient{cc}
}
func (c *groupCacheClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) {
out := new(GetResponse)
err := c.cc.Invoke(ctx, "/groupcachepb.GroupCache/Get", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// GroupCacheServer is the server API for GroupCache service.
type GroupCacheServer interface {
Get(context.Context, *GetRequest) (*GetResponse, error)
}
func RegisterGroupCacheServer(s *grpc.Server, srv GroupCacheServer) {
s.RegisterService(&_GroupCache_serviceDesc, srv)
}
func _GroupCache_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GroupCacheServer).Get(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/groupcachepb.GroupCache/Get",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GroupCacheServer).Get(ctx, req.(*GetRequest))
}
return interceptor(ctx, in, info, handler)
}
var _GroupCache_serviceDesc = grpc.ServiceDesc{
ServiceName: "groupcachepb.GroupCache",
HandlerType: (*GroupCacheServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Get",
Handler: _GroupCache_Get_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "server/pkg/cache/groupcachepb/groupcache.proto",
}
func (m *GetRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Group == nil {
return 0, new(github_com_golang_protobuf_proto.RequiredNotSetError)
} else {
dAtA[i] = 0xa
i++
i = encodeVarintGroupcache(dAtA, i, uint64(len(*m.Group)))
i += copy(dAtA[i:], *m.Group)
}
if m.Key == nil {
return 0, new(github_com_golang_protobuf_proto.RequiredNotSetError)
} else {
dAtA[i] = 0x12
i++
i = encodeVarintGroupcache(dAtA, i, uint64(len(*m.Key)))
i += copy(dAtA[i:], *m.Key)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Value != nil {
dAtA[i] = 0xa
i++
i = encodeVarintGroupcache(dAtA, i, uint64(len(m.Value)))
i += copy(dAtA[i:], m.Value)
}
if m.MinuteQps != nil {
dAtA[i] = 0x11
i++
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MinuteQps))))
i += 8
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func encodeVarintGroupcache(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *GetRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Group != nil {
l = len(*m.Group)
n += 1 + l + sovGroupcache(uint64(l))
}
if m.Key != nil {
l = len(*m.Key)
n += 1 + l + sovGroupcache(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Value != nil {
l = len(m.Value)
n += 1 + l + sovGroupcache(uint64(l))
}
if m.MinuteQps != nil {
n += 9
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovGroupcache(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozGroupcache(x uint64) (n int) {
return sovGroupcache(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *GetRequest) Unmarshal(dAtA []byte) error {
var hasFields [1]uint64
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupcache
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupcache
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGroupcache
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.Group = &s
iNdEx = postIndex
hasFields[0] |= uint64(0x00000001)
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupcache
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGroupcache
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.Key = &s
iNdEx = postIndex
hasFields[0] |= uint64(0x00000002)
default:
iNdEx = preIndex
skippy, err := skipGroupcache(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGroupcache
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if hasFields[0]&uint64(0x00000001) == 0 {
return new(github_com_golang_protobuf_proto.RequiredNotSetError)
}
if hasFields[0]&uint64(0x00000002) == 0 {
return new(github_com_golang_protobuf_proto.RequiredNotSetError)
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupcache
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupcache
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthGroupcache
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field MinuteQps", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
v2 := float64(math.Float64frombits(v))
m.MinuteQps = &v2
default:
iNdEx = preIndex
skippy, err := skipGroupcache(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGroupcache
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGroupcache(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupcache
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupcache
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupcache
} | iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthGroupcache
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupcache
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipGroupcache(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthGroupcache = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGroupcache = fmt.Errorf("proto: integer overflow")
) | if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx] |
test_views.py | import pytest
from testutils.factories import create_test_person
from django.contrib.auth.models import User, Permission
from openstates.data.models import Person, Organization
from people_admin.models import UnmatchedName, NameStatus, DeltaSet
from people_admin.views import MATCHER_PERM, EDIT_PERM, RETIRE_PERM
import json
@pytest.fixture
def admin_user():
u = User.objects.create(username="admin")
user_permissions = list(
Permission.objects.filter( | ).values_list("id", flat=True)
)
u.user_permissions.set(user_permissions)
return u
@pytest.mark.django_db
def test_apply_match_matches(client, django_assert_num_queries, kansas, admin_user):
p = Person.objects.create(name="Samuel L. Jackson")
# kansas is a test fixture, it has some fake data attached we can use
session = kansas.legislative_sessions.get(identifier="2020")
UnmatchedName.objects.create(
id=1, session=session, name="Sam Jackson", sponsorships_count=5, votes_count=5
)
apply_data = {
"match_data": {"unmatchedId": 1, "button": "Match", "matchedId": p.id}
}
client.force_login(admin_user)
with django_assert_num_queries(6):
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(apply_data),
content_type="application/json",
)
assert resp.status_code == 200
assert resp.json() == {"status": "success"}
# get refreshed object from database
matched = UnmatchedName.objects.get()
assert matched.status == NameStatus.MATCHED_PERSON
assert matched.matched_person_id == p.id
@pytest.mark.django_db
def test_apply_match_ignore(client, django_assert_num_queries, kansas, admin_user):
session = kansas.legislative_sessions.get(identifier="2020")
UnmatchedName.objects.create(
id=2, session=session, name="Eva Green", sponsorships_count=16, votes_count=7
)
match_data = {"match_data": {"unmatchedId": 2, "button": "Ignore", "matchedId": ""}}
client.force_login(admin_user)
with django_assert_num_queries(6):
# client can be used to mock GET/POST/etc.
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(match_data),
content_type="application/json",
)
assert resp.status_code == 200
assert resp.json() == {"status": "success"}
# get refreshed object from database
matched = UnmatchedName.objects.get()
assert matched.status == NameStatus.IGNORED
@pytest.mark.django_db
def test_apply_match_source_error(
client, django_assert_num_queries, kansas, admin_user
):
session = kansas.legislative_sessions.get(identifier="2020")
UnmatchedName.objects.create(
id=3,
session=session,
name="David Tennant",
sponsorships_count=10,
votes_count=2,
)
match_data = {
"match_data": {"unmatchedId": 3, "button": "Source Error", "matchedId": ""}
}
client.force_login(admin_user)
with django_assert_num_queries(6):
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(match_data),
content_type="application/json",
)
assert resp.status_code == 200
assert resp.json() == {"status": "success"}
# get refreshed object from database
matched = UnmatchedName.objects.get()
assert matched.status == NameStatus.SOURCE_ERROR
@pytest.mark.django_db
def test_apply_match_404(client, django_assert_num_queries, admin_user):
client.force_login(admin_user)
with django_assert_num_queries(5):
match_data = {
"match_data": {"unmatchedId": 9999, "button": "Match", "matchedId": "1"}
}
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(match_data),
content_type="application/json",
)
assert resp.status_code == 404
@pytest.mark.django_db
def test_people_list(client, django_assert_num_queries, admin_user, kansas):
house = Organization.objects.get(name="Kansas House")
senate = Organization.objects.get(name="Kansas Senate")
sam = create_test_person("Sam Jackson", org=house, party="Democratic", district="1")
sam.identifiers.create(scheme="twitter", identifier="@SamuelLJackson")
sam.contact_details.create(
value="555-555-5555", type="voice", note="Capitol Office"
)
create_test_person("Bosephorous Fogg", org=house, party="Republican", district="2")
create_test_person("Cran Crumble", org=senate, party="Republican", district="A")
client.force_login(admin_user)
with django_assert_num_queries(7):
resp = client.get("/admin/people/ks/")
assert resp.status_code == 200
people = resp.context["context"]["current_people"]
assert len(people) == 3
sam_data = [p for p in people if p["name"] == "Sam Jackson"][0]
assert sam_data["district"] == "1"
assert sam_data["twitter"] == "@SamuelLJackson"
assert sam_data["capitol_voice"] == "555-555-5555"
@pytest.mark.django_db
def test_retire_person(client, django_assert_num_queries, admin_user, kansas):
house = Organization.objects.get(name="Kansas House")
sam = create_test_person("Sam Jackson", org=house, party="Democratic", district="1")
retire_data = {
"id": sam.id,
"name": sam.name,
"reason": "ran for new office",
"retirementDate": "2021-01-01",
"isDead": False,
"vacantSeat": True,
}
client.force_login(admin_user)
with django_assert_num_queries(6):
resp = client.post(
"/admin/people/retire/",
json.dumps(retire_data),
content_type="application/json",
)
assert resp.status_code == 200
ds = DeltaSet.objects.get()
assert "retire Sam Jackson" == ds.name
assert ds.person_retirements.all().count() == 1
retirement = ds.person_retirements.get()
assert retirement.person_id == sam.id
assert retirement.reason == "ran for new office"
assert retirement.date == "2021-01-01"
assert retirement.is_vacant
assert retirement.is_dead is False | codename__in=[
p.split(".")[1] for p in (MATCHER_PERM, EDIT_PERM, RETIRE_PERM)
] |
expr.go | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements typechecking of expressions.
package types
import (
"fmt"
"go/ast"
"go/constant"
"go/token"
"math"
)
/*
Basic algorithm:
Expressions are checked recursively, top down. Expression checker functions
are generally of the form:
func f(x *operand, e *ast.Expr, ...)
where e is the expression to be checked, and x is the result of the check.
The check performed by f may fail in which case x.mode == invalid, and
related error messages will have been issued by f.
If a hint argument is present, it is the composite literal element type
of an outer composite literal; it is used to type-check composite literal
elements that have no explicit type specification in the source
(e.g.: []T{{...}, {...}}, the hint is the type T in this case).
All expressions are checked via rawExpr, which dispatches according
to expression kind. Upon returning, rawExpr is recording the types and
constant values for all expressions that have an untyped type (those types
may change on the way up in the expression tree). Usually these are constants,
but the results of comparisons or non-constant shifts of untyped constants
may also be untyped, but not constant.
Untyped expressions may eventually become fully typed (i.e., not untyped),
typically when the value is assigned to a variable, or is used otherwise.
The updateExprType method is used to record this final type and update
the recorded types: the type-checked expression tree is again traversed down,
and the new type is propagated as needed. Untyped constant expression values
that become fully typed must now be representable by the full type (constant
sub-expression trees are left alone except for their roots). This mechanism
ensures that a client sees the actual (run-time) type an untyped value would
have. It also permits type-checking of lhs shift operands "as if the shift
were not present": when updateExprType visits an untyped lhs shift operand
and assigns it it's final type, that type must be an integer type, and a
constant lhs must be representable as an integer.
When an expression gets its final type, either on the way out from rawExpr,
on the way down in updateExprType, or at the end of the type checker run,
the type (and constant value, if any) is recorded via Info.Types, if present.
*/
type opPredicates map[token.Token]func(Type) bool
var unaryOpPredicates = opPredicates{
token.ADD: isNumeric,
token.SUB: isNumeric,
token.XOR: isInteger,
token.NOT: isBoolean,
}
func (check *Checker) op(m opPredicates, x *operand, op token.Token) bool {
if pred := m[op]; pred != nil {
if !pred(x.typ) {
check.invalidOp(x.pos(), "operator %s not defined for %s", op, x)
return false
}
} else {
check.invalidAST(x.pos(), "unknown operator %s", op)
return false
}
return true
}
// The unary expression e may be nil. It's passed in for better error messages only.
func (check *Checker) unary(x *operand, e *ast.UnaryExpr, op token.Token) {
switch op {
case token.AND:
// spec: "As an exception to the addressability
// requirement x may also be a composite literal."
if _, ok := unparen(x.expr).(*ast.CompositeLit); !ok && x.mode != variable {
check.invalidOp(x.pos(), "cannot take address of %s", x)
x.mode = invalid
return
}
x.mode = value
x.typ = &Pointer{base: x.typ}
return
case token.ARROW:
typ, ok := x.typ.Underlying().(*Chan)
if !ok {
check.invalidOp(x.pos(), "cannot receive from non-channel %s", x)
x.mode = invalid
return
}
if typ.dir == SendOnly {
check.invalidOp(x.pos(), "cannot receive from send-only channel %s", x)
x.mode = invalid
return
}
x.mode = commaok
x.typ = typ.elem
check.hasCallOrRecv = true
return
}
if !check.op(unaryOpPredicates, x, op) {
x.mode = invalid
return
}
if x.mode == constant_ {
typ := x.typ.Underlying().(*Basic)
var prec uint
if isUnsigned(typ) {
prec = uint(check.conf.sizeof(typ) * 8)
}
x.val = constant.UnaryOp(op, x.val, prec)
// Typed constants must be representable in
// their type after each constant operation.
if isTyped(typ) {
if e != nil {
x.expr = e // for better error message
}
check.representable(x, typ)
}
return
}
x.mode = value
// x.typ remains unchanged
}
func isShift(op token.Token) bool {
return op == token.SHL || op == token.SHR
}
func isComparison(op token.Token) bool {
// Note: tokens are not ordered well to make this much easier
switch op {
case token.EQL, token.NEQ, token.LSS, token.LEQ, token.GTR, token.GEQ:
return true
}
return false
}
func | (x constant.Value) bool {
f32, _ := constant.Float32Val(x)
f := float64(f32)
return !math.IsInf(f, 0)
}
func roundFloat32(x constant.Value) constant.Value {
f32, _ := constant.Float32Val(x)
f := float64(f32)
if !math.IsInf(f, 0) {
return constant.MakeFloat64(f)
}
return nil
}
func fitsFloat64(x constant.Value) bool {
f, _ := constant.Float64Val(x)
return !math.IsInf(f, 0)
}
func roundFloat64(x constant.Value) constant.Value {
f, _ := constant.Float64Val(x)
if !math.IsInf(f, 0) {
return constant.MakeFloat64(f)
}
return nil
}
// representableConst reports whether x can be represented as
// value of the given basic type and for the configuration
// provided (only needed for int/uint sizes).
//
// If rounded != nil, *rounded is set to the rounded value of x for
// representable floating-point and complex values, and to an Int
// value for integer values; it is left alone otherwise.
// It is ok to provide the addressof the first argument for rounded.
func representableConst(x constant.Value, conf *Config, typ *Basic, rounded *constant.Value) bool {
if x.Kind() == constant.Unknown {
return true // avoid follow-up errors
}
switch {
case isInteger(typ):
x := constant.ToInt(x)
if x.Kind() != constant.Int {
return false
}
if rounded != nil {
*rounded = x
}
if x, ok := constant.Int64Val(x); ok {
switch typ.kind {
case Int:
var s = uint(conf.sizeof(typ)) * 8
return int64(-1)<<(s-1) <= x && x <= int64(1)<<(s-1)-1
case Int8:
const s = 8
return -1<<(s-1) <= x && x <= 1<<(s-1)-1
case Int16:
const s = 16
return -1<<(s-1) <= x && x <= 1<<(s-1)-1
case Int32:
const s = 32
return -1<<(s-1) <= x && x <= 1<<(s-1)-1
case Int64, UntypedInt:
return true
case Uint, Uintptr:
if s := uint(conf.sizeof(typ)) * 8; s < 64 {
return 0 <= x && x <= int64(1)<<s-1
}
return 0 <= x
case Uint8:
const s = 8
return 0 <= x && x <= 1<<s-1
case Uint16:
const s = 16
return 0 <= x && x <= 1<<s-1
case Uint32:
const s = 32
return 0 <= x && x <= 1<<s-1
case Uint64:
return 0 <= x
default:
unreachable()
}
}
// x does not fit into int64
switch n := constant.BitLen(x); typ.kind {
case Uint, Uintptr:
var s = uint(conf.sizeof(typ)) * 8
return constant.Sign(x) >= 0 && n <= int(s)
case Uint64:
return constant.Sign(x) >= 0 && n <= 64
case UntypedInt:
return true
}
case isFloat(typ):
x := constant.ToFloat(x)
if x.Kind() != constant.Float {
return false
}
switch typ.kind {
case Float32:
if rounded == nil {
return fitsFloat32(x)
}
r := roundFloat32(x)
if r != nil {
*rounded = r
return true
}
case Float64:
if rounded == nil {
return fitsFloat64(x)
}
r := roundFloat64(x)
if r != nil {
*rounded = r
return true
}
case UntypedFloat:
return true
default:
unreachable()
}
case isComplex(typ):
x := constant.ToComplex(x)
if x.Kind() != constant.Complex {
return false
}
switch typ.kind {
case Complex64:
if rounded == nil {
return fitsFloat32(constant.Real(x)) && fitsFloat32(constant.Imag(x))
}
re := roundFloat32(constant.Real(x))
im := roundFloat32(constant.Imag(x))
if re != nil && im != nil {
*rounded = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
return true
}
case Complex128:
if rounded == nil {
return fitsFloat64(constant.Real(x)) && fitsFloat64(constant.Imag(x))
}
re := roundFloat64(constant.Real(x))
im := roundFloat64(constant.Imag(x))
if re != nil && im != nil {
*rounded = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
return true
}
case UntypedComplex:
return true
default:
unreachable()
}
case isString(typ):
return x.Kind() == constant.String
case isBoolean(typ):
return x.Kind() == constant.Bool
}
return false
}
// representable checks that a constant operand is representable in the given basic type.
func (check *Checker) representable(x *operand, typ *Basic) {
assert(x.mode == constant_)
if !representableConst(x.val, check.conf, typ, &x.val) {
var msg string
if isNumeric(x.typ) && isNumeric(typ) {
// numeric conversion : error msg
//
// integer -> integer : overflows
// integer -> float : overflows (actually not possible)
// float -> integer : truncated
// float -> float : overflows
//
if !isInteger(x.typ) && isInteger(typ) {
msg = "%s truncated to %s"
} else {
msg = "%s overflows %s"
}
} else {
msg = "cannot convert %s to %s"
}
check.errorf(x.pos(), msg, x, typ)
x.mode = invalid
}
}
// updateExprType updates the type of x to typ and invokes itself
// recursively for the operands of x, depending on expression kind.
// If typ is still an untyped and not the final type, updateExprType
// only updates the recorded untyped type for x and possibly its
// operands. Otherwise (i.e., typ is not an untyped type anymore,
// or it is the final type for x), the type and value are recorded.
// Also, if x is a constant, it must be representable as a value of typ,
// and if x is the (formerly untyped) lhs operand of a non-constant
// shift, it must be an integer value.
//
func (check *Checker) updateExprType(x ast.Expr, typ Type, final bool) {
old, found := check.untyped[x]
if !found {
return // nothing to do
}
// update operands of x if necessary
switch x := x.(type) {
case *ast.BadExpr,
*ast.FuncLit,
*ast.CompositeLit,
*ast.IndexExpr,
*ast.SliceExpr,
*ast.TypeAssertExpr,
*ast.StarExpr,
*ast.KeyValueExpr,
*ast.ArrayType,
*ast.StructType,
*ast.FuncType,
*ast.InterfaceType,
*ast.MapType,
*ast.ChanType:
// These expression are never untyped - nothing to do.
// The respective sub-expressions got their final types
// upon assignment or use.
if debug {
check.dump("%s: found old type(%s): %s (new: %s)", x.Pos(), x, old.typ, typ)
unreachable()
}
return
case *ast.CallExpr:
// Resulting in an untyped constant (e.g., built-in complex).
// The respective calls take care of calling updateExprType
// for the arguments if necessary.
case *ast.Ident, *ast.BasicLit, *ast.SelectorExpr:
// An identifier denoting a constant, a constant literal,
// or a qualified identifier (imported untyped constant).
// No operands to take care of.
case *ast.ParenExpr:
check.updateExprType(x.X, typ, final)
case *ast.UnaryExpr:
// If x is a constant, the operands were constants.
// They don't need to be updated since they never
// get "materialized" into a typed value; and they
// will be processed at the end of the type check.
if old.val != nil {
break
}
check.updateExprType(x.X, typ, final)
case *ast.BinaryExpr:
if old.val != nil {
break // see comment for unary expressions
}
if isComparison(x.Op) {
// The result type is independent of operand types
// and the operand types must have final types.
} else if isShift(x.Op) {
// The result type depends only on lhs operand.
// The rhs type was updated when checking the shift.
check.updateExprType(x.X, typ, final)
} else {
// The operand types match the result type.
check.updateExprType(x.X, typ, final)
check.updateExprType(x.Y, typ, final)
}
default:
unreachable()
}
// If the new type is not final and still untyped, just
// update the recorded type.
if !final && isUntyped(typ) {
old.typ = typ.Underlying().(*Basic)
check.untyped[x] = old
return
}
// Otherwise we have the final (typed or untyped type).
// Remove it from the map of yet untyped expressions.
delete(check.untyped, x)
// If x is the lhs of a shift, its final type must be integer.
// We already know from the shift check that it is representable
// as an integer if it is a constant.
if old.isLhs && !isInteger(typ) {
check.invalidOp(x.Pos(), "shifted operand %s (type %s) must be integer", x, typ)
return
}
// Everything's fine, record final type and value for x.
check.recordTypeAndValue(x, old.mode, typ, old.val)
}
// updateExprVal updates the value of x to val.
func (check *Checker) updateExprVal(x ast.Expr, val constant.Value) {
if info, ok := check.untyped[x]; ok {
info.val = val
check.untyped[x] = info
}
}
// convertUntyped attempts to set the type of an untyped value to the target type.
func (check *Checker) convertUntyped(x *operand, target Type) {
if x.mode == invalid || isTyped(x.typ) || target == Typ[Invalid] {
return
}
// TODO(gri) Sloppy code - clean up. This function is central
// to assignment and expression checking.
if isUntyped(target) {
// both x and target are untyped
xkind := x.typ.(*Basic).kind
tkind := target.(*Basic).kind
if isNumeric(x.typ) && isNumeric(target) {
if xkind < tkind {
x.typ = target
check.updateExprType(x.expr, target, false)
}
} else if xkind != tkind {
goto Error
}
return
}
// typed target
switch t := target.Underlying().(type) {
case *Basic:
if x.mode == constant_ {
check.representable(x, t)
if x.mode == invalid {
return
}
// expression value may have been rounded - update if needed
check.updateExprVal(x.expr, x.val)
} else {
// Non-constant untyped values may appear as the
// result of comparisons (untyped bool), intermediate
// (delayed-checked) rhs operands of shifts, and as
// the value nil.
switch x.typ.(*Basic).kind {
case UntypedBool:
if !isBoolean(target) {
goto Error
}
case UntypedInt, UntypedRune, UntypedFloat, UntypedComplex:
if !isNumeric(target) {
goto Error
}
case UntypedString:
// Non-constant untyped string values are not
// permitted by the spec and should not occur.
unreachable()
case UntypedNil:
// Unsafe.Pointer is a basic type that includes nil.
if !hasNil(target) {
goto Error
}
default:
goto Error
}
}
case *Interface:
if !x.isNil() && !t.Empty() /* empty interfaces are ok */ {
goto Error
}
// Update operand types to the default type rather then
// the target (interface) type: values must have concrete
// dynamic types. If the value is nil, keep it untyped
// (this is important for tools such as go vet which need
// the dynamic type for argument checking of say, print
// functions)
if x.isNil() {
target = Typ[UntypedNil]
} else {
// cannot assign untyped values to non-empty interfaces
if !t.Empty() {
goto Error
}
target = Default(x.typ)
}
case *Pointer, *Signature, *Slice, *Map, *Chan:
if !x.isNil() {
goto Error
}
// keep nil untyped - see comment for interfaces, above
target = Typ[UntypedNil]
default:
goto Error
}
x.typ = target
check.updateExprType(x.expr, target, true) // UntypedNils are final
return
Error:
check.errorf(x.pos(), "cannot convert %s to %s", x, target)
x.mode = invalid
}
func (check *Checker) comparison(x, y *operand, op token.Token) {
// spec: "In any comparison, the first operand must be assignable
// to the type of the second operand, or vice versa."
err := ""
if x.assignableTo(check.conf, y.typ, nil) || y.assignableTo(check.conf, x.typ, nil) {
defined := false
switch op {
case token.EQL, token.NEQ:
// spec: "The equality operators == and != apply to operands that are comparable."
defined = Comparable(x.typ) || x.isNil() && hasNil(y.typ) || y.isNil() && hasNil(x.typ)
case token.LSS, token.LEQ, token.GTR, token.GEQ:
// spec: The ordering operators <, <=, >, and >= apply to operands that are ordered."
defined = isOrdered(x.typ)
default:
unreachable()
}
if !defined {
typ := x.typ
if x.isNil() {
typ = y.typ
}
err = check.sprintf("operator %s not defined for %s", op, typ)
}
} else {
err = check.sprintf("mismatched types %s and %s", x.typ, y.typ)
}
if err != "" {
check.errorf(x.pos(), "cannot compare %s %s %s (%s)", x.expr, op, y.expr, err)
x.mode = invalid
return
}
if x.mode == constant_ && y.mode == constant_ {
x.val = constant.MakeBool(constant.Compare(x.val, op, y.val))
// The operands are never materialized; no need to update
// their types.
} else {
x.mode = value
// The operands have now their final types, which at run-
// time will be materialized. Update the expression trees.
// If the current types are untyped, the materialized type
// is the respective default type.
check.updateExprType(x.expr, Default(x.typ), true)
check.updateExprType(y.expr, Default(y.typ), true)
}
// spec: "Comparison operators compare two operands and yield
// an untyped boolean value."
x.typ = Typ[UntypedBool]
}
func (check *Checker) shift(x, y *operand, e *ast.BinaryExpr, op token.Token) {
untypedx := isUntyped(x.typ)
var xval constant.Value
if x.mode == constant_ {
xval = constant.ToInt(x.val)
}
if isInteger(x.typ) || untypedx && xval != nil && xval.Kind() == constant.Int {
// The lhs is of integer type or an untyped constant representable
// as an integer. Nothing to do.
} else {
// shift has no chance
check.invalidOp(x.pos(), "shifted operand %s must be integer", x)
x.mode = invalid
return
}
// spec: "The right operand in a shift expression must have unsigned
// integer type or be an untyped constant that can be converted to
// unsigned integer type."
switch {
case isUnsigned(y.typ):
// nothing to do
case isUntyped(y.typ):
check.convertUntyped(y, Typ[UntypedInt])
if y.mode == invalid {
x.mode = invalid
return
}
default:
check.invalidOp(y.pos(), "shift count %s must be unsigned integer", y)
x.mode = invalid
return
}
if x.mode == constant_ {
if y.mode == constant_ {
// rhs must be an integer value
yval := constant.ToInt(y.val)
if yval.Kind() != constant.Int {
check.invalidOp(y.pos(), "shift count %s must be unsigned integer", y)
x.mode = invalid
return
}
// rhs must be within reasonable bounds
const shiftBound = 1023 - 1 + 52 // so we can express smallestFloat64
s, ok := constant.Uint64Val(yval)
if !ok || s > shiftBound {
check.invalidOp(y.pos(), "invalid shift count %s", y)
x.mode = invalid
return
}
// The lhs is representable as an integer but may not be an integer
// (e.g., 2.0, an untyped float) - this can only happen for untyped
// non-integer numeric constants. Correct the type so that the shift
// result is of integer type.
if !isInteger(x.typ) {
x.typ = Typ[UntypedInt]
}
// x is a constant so xval != nil and it must be of Int kind.
x.val = constant.Shift(xval, op, uint(s))
// Typed constants must be representable in
// their type after each constant operation.
if isTyped(x.typ) {
if e != nil {
x.expr = e // for better error message
}
check.representable(x, x.typ.Underlying().(*Basic))
}
return
}
// non-constant shift with constant lhs
if untypedx {
// spec: "If the left operand of a non-constant shift
// expression is an untyped constant, the type of the
// constant is what it would be if the shift expression
// were replaced by its left operand alone.".
//
// Delay operand checking until we know the final type
// by marking the lhs expression as lhs shift operand.
//
// Usually (in correct programs), the lhs expression
// is in the untyped map. However, it is possible to
// create incorrect programs where the same expression
// is evaluated twice (via a declaration cycle) such
// that the lhs expression type is determined in the
// first round and thus deleted from the map, and then
// not found in the second round (double insertion of
// the same expr node still just leads to one entry for
// that node, and it can only be deleted once).
// Be cautious and check for presence of entry.
// Example: var e, f = int(1<<""[f]) // issue 11347
if info, found := check.untyped[x.expr]; found {
info.isLhs = true
check.untyped[x.expr] = info
}
// keep x's type
x.mode = value
return
}
}
// constant rhs must be >= 0
if y.mode == constant_ && constant.Sign(y.val) < 0 {
check.invalidOp(y.pos(), "shift count %s must not be negative", y)
}
// non-constant shift - lhs must be an integer
if !isInteger(x.typ) {
check.invalidOp(x.pos(), "shifted operand %s must be integer", x)
x.mode = invalid
return
}
x.mode = value
}
var binaryOpPredicates = opPredicates{
token.ADD: func(typ Type) bool { return isNumeric(typ) || isString(typ) },
token.SUB: isNumeric,
token.MUL: isNumeric,
token.QUO: isNumeric,
token.REM: isInteger,
token.AND: isInteger,
token.OR: isInteger,
token.XOR: isInteger,
token.AND_NOT: isInteger,
token.LAND: isBoolean,
token.LOR: isBoolean,
}
// The binary expression e may be nil. It's passed in for better error messages only.
func (check *Checker) binary(x *operand, e *ast.BinaryExpr, lhs, rhs ast.Expr, op token.Token) {
var y operand
check.expr(x, lhs)
check.expr(&y, rhs)
if x.mode == invalid {
return
}
if y.mode == invalid {
x.mode = invalid
x.expr = y.expr
return
}
if isShift(op) {
check.shift(x, &y, e, op)
return
}
check.convertUntyped(x, y.typ)
if x.mode == invalid {
return
}
check.convertUntyped(&y, x.typ)
if y.mode == invalid {
x.mode = invalid
return
}
if isComparison(op) {
check.comparison(x, &y, op)
return
}
if !Identical(x.typ, y.typ) {
// only report an error if we have valid types
// (otherwise we had an error reported elsewhere already)
if x.typ != Typ[Invalid] && y.typ != Typ[Invalid] {
check.invalidOp(x.pos(), "mismatched types %s and %s", x.typ, y.typ)
}
x.mode = invalid
return
}
if !check.op(binaryOpPredicates, x, op) {
x.mode = invalid
return
}
if (op == token.QUO || op == token.REM) && (x.mode == constant_ || isInteger(x.typ)) && y.mode == constant_ && constant.Sign(y.val) == 0 {
check.invalidOp(y.pos(), "division by zero")
x.mode = invalid
return
}
if x.mode == constant_ && y.mode == constant_ {
xval := x.val
yval := y.val
typ := x.typ.Underlying().(*Basic)
// force integer division of integer operands
if op == token.QUO && isInteger(typ) {
op = token.QUO_ASSIGN
}
x.val = constant.BinaryOp(xval, op, yval)
// Typed constants must be representable in
// their type after each constant operation.
if isTyped(typ) {
if e != nil {
x.expr = e // for better error message
}
check.representable(x, typ)
}
return
}
x.mode = value
// x.typ is unchanged
}
// index checks an index expression for validity.
// If max >= 0, it is the upper bound for index.
// If index is valid and the result i >= 0, then i is the constant value of index.
func (check *Checker) index(index ast.Expr, max int64) (i int64, valid bool) {
var x operand
check.expr(&x, index)
if x.mode == invalid {
return
}
// an untyped constant must be representable as Int
check.convertUntyped(&x, Typ[Int])
if x.mode == invalid {
return
}
// the index must be of integer type
if !isInteger(x.typ) {
check.invalidArg(x.pos(), "index %s must be integer", &x)
return
}
// a constant index i must be in bounds
if x.mode == constant_ {
if constant.Sign(x.val) < 0 {
check.invalidArg(x.pos(), "index %s must not be negative", &x)
return
}
i, valid = constant.Int64Val(constant.ToInt(x.val))
if !valid || max >= 0 && i >= max {
check.errorf(x.pos(), "index %s is out of bounds", &x)
return i, false
}
// 0 <= i [ && i < max ]
return i, true
}
return -1, true
}
// indexElts checks the elements (elts) of an array or slice composite literal
// against the literal's element type (typ), and the element indices against
// the literal length if known (length >= 0). It returns the length of the
// literal (maximum index value + 1).
//
func (check *Checker) indexedElts(elts []ast.Expr, typ Type, length int64) int64 {
visited := make(map[int64]bool, len(elts))
var index, max int64
for _, e := range elts {
// determine and check index
validIndex := false
eval := e
if kv, _ := e.(*ast.KeyValueExpr); kv != nil {
if i, ok := check.index(kv.Key, length); ok {
if i >= 0 {
index = i
validIndex = true
} else {
check.errorf(e.Pos(), "index %s must be integer constant", kv.Key)
}
}
eval = kv.Value
} else if length >= 0 && index >= length {
check.errorf(e.Pos(), "index %d is out of bounds (>= %d)", index, length)
} else {
validIndex = true
}
// if we have a valid index, check for duplicate entries
if validIndex {
if visited[index] {
check.errorf(e.Pos(), "duplicate index %d in array or slice literal", index)
}
visited[index] = true
}
index++
if index > max {
max = index
}
// check element against composite literal element type
var x operand
check.exprWithHint(&x, eval, typ)
check.assignment(&x, typ, "array or slice literal")
}
return max
}
// exprKind describes the kind of an expression; the kind
// determines if an expression is valid in 'statement context'.
type exprKind int
const (
conversion exprKind = iota
expression
statement
)
// rawExpr typechecks expression e and initializes x with the expression
// value or type. If an error occurred, x.mode is set to invalid.
// If hint != nil, it is the type of a composite literal element.
//
func (check *Checker) rawExpr(x *operand, e ast.Expr, hint Type) exprKind {
if trace {
check.trace(e.Pos(), "%s", e)
check.indent++
defer func() {
check.indent--
check.trace(e.Pos(), "=> %s", x)
}()
}
kind := check.exprInternal(x, e, hint)
// convert x into a user-friendly set of values
// TODO(gri) this code can be simplified
var typ Type
var val constant.Value
switch x.mode {
case invalid:
typ = Typ[Invalid]
case novalue:
typ = (*Tuple)(nil)
case constant_:
typ = x.typ
val = x.val
default:
typ = x.typ
}
assert(x.expr != nil && typ != nil)
if isUntyped(typ) {
// delay type and value recording until we know the type
// or until the end of type checking
check.rememberUntyped(x.expr, false, x.mode, typ.(*Basic), val)
} else {
check.recordTypeAndValue(e, x.mode, typ, val)
}
return kind
}
// exprInternal contains the core of type checking of expressions.
// Must only be called by rawExpr.
//
func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind {
// make sure x has a valid state in case of bailout
// (was issue 5770)
x.mode = invalid
x.typ = Typ[Invalid]
switch e := e.(type) {
case *ast.BadExpr:
goto Error // error was reported before
case *ast.Ident:
check.ident(x, e, nil, nil)
case *ast.Ellipsis:
// ellipses are handled explicitly where they are legal
// (array composite literals and parameter lists)
check.error(e.Pos(), "invalid use of '...'")
goto Error
case *ast.BasicLit:
x.setConst(e.Kind, e.Value)
if x.mode == invalid {
check.invalidAST(e.Pos(), "invalid literal %v", e.Value)
goto Error
}
case *ast.FuncLit:
if sig, ok := check.typ(e.Type).(*Signature); ok {
// Anonymous functions are considered part of the
// init expression/func declaration which contains
// them: use existing package-level declaration info.
check.funcBody(check.decl, "", sig, e.Body)
x.mode = value
x.typ = sig
} else {
check.invalidAST(e.Pos(), "invalid function literal %s", e)
goto Error
}
case *ast.CompositeLit:
var typ, base Type
switch {
case e.Type != nil:
// composite literal type present - use it
// [...]T array types may only appear with composite literals.
// Check for them here so we don't have to handle ... in general.
if atyp, _ := e.Type.(*ast.ArrayType); atyp != nil && atyp.Len != nil {
if ellip, _ := atyp.Len.(*ast.Ellipsis); ellip != nil && ellip.Elt == nil {
// We have an "open" [...]T array type.
// Create a new ArrayType with unknown length (-1)
// and finish setting it up after analyzing the literal.
typ = &Array{len: -1, elem: check.typ(atyp.Elt)}
base = typ
break
}
}
typ = check.typ(e.Type)
base = typ
case hint != nil:
// no composite literal type present - use hint (element type of enclosing type)
typ = hint
base, _ = deref(typ.Underlying()) // *T implies &T{}
default:
// TODO(gri) provide better error messages depending on context
check.error(e.Pos(), "missing type in composite literal")
goto Error
}
switch utyp := base.Underlying().(type) {
case *Struct:
if len(e.Elts) == 0 {
break
}
fields := utyp.fields
if _, ok := e.Elts[0].(*ast.KeyValueExpr); ok {
// all elements must have keys
visited := make([]bool, len(fields))
for _, e := range e.Elts {
kv, _ := e.(*ast.KeyValueExpr)
if kv == nil {
check.error(e.Pos(), "mixture of field:value and value elements in struct literal")
continue
}
key, _ := kv.Key.(*ast.Ident)
if key == nil {
check.errorf(kv.Pos(), "invalid field name %s in struct literal", kv.Key)
continue
}
i := fieldIndex(utyp.fields, check.pkg, key.Name)
if i < 0 {
check.errorf(kv.Pos(), "unknown field %s in struct literal", key.Name)
continue
}
fld := fields[i]
check.recordUse(key, fld)
// 0 <= i < len(fields)
if visited[i] {
check.errorf(kv.Pos(), "duplicate field name %s in struct literal", key.Name)
continue
}
visited[i] = true
check.expr(x, kv.Value)
etyp := fld.typ
check.assignment(x, etyp, "struct literal")
}
} else {
// no element must have a key
for i, e := range e.Elts {
if kv, _ := e.(*ast.KeyValueExpr); kv != nil {
check.error(kv.Pos(), "mixture of field:value and value elements in struct literal")
continue
}
check.expr(x, e)
if i >= len(fields) {
check.error(x.pos(), "too many values in struct literal")
break // cannot continue
}
// i < len(fields)
fld := fields[i]
if !fld.Exported() && fld.pkg != check.pkg {
check.errorf(x.pos(), "implicit assignment to unexported field %s in %s literal", fld.name, typ)
continue
}
etyp := fld.typ
check.assignment(x, etyp, "struct literal")
}
if len(e.Elts) < len(fields) {
check.error(e.Rbrace, "too few values in struct literal")
// ok to continue
}
}
case *Array:
n := check.indexedElts(e.Elts, utyp.elem, utyp.len)
// If we have an "open" [...]T array, set the length now that we know it
// and record the type for [...] (usually done by check.typExpr which is
// not called for [...]).
if utyp.len < 0 {
utyp.len = n
check.recordTypeAndValue(e.Type, typexpr, utyp, nil)
}
case *Slice:
check.indexedElts(e.Elts, utyp.elem, -1)
case *Map:
visited := make(map[interface{}][]Type, len(e.Elts))
for _, e := range e.Elts {
kv, _ := e.(*ast.KeyValueExpr)
if kv == nil {
check.error(e.Pos(), "missing key in map literal")
continue
}
check.exprWithHint(x, kv.Key, utyp.key)
check.assignment(x, utyp.key, "map literal")
if x.mode == invalid {
continue
}
if x.mode == constant_ {
duplicate := false
// if the key is of interface type, the type is also significant when checking for duplicates
if _, ok := utyp.key.Underlying().(*Interface); ok {
for _, vtyp := range visited[x.val] {
if Identical(vtyp, x.typ) {
duplicate = true
break
}
}
visited[x.val] = append(visited[x.val], x.typ)
} else {
_, duplicate = visited[x.val]
visited[x.val] = nil
}
if duplicate {
check.errorf(x.pos(), "duplicate key %s in map literal", x.val)
continue
}
}
check.exprWithHint(x, kv.Value, utyp.elem)
check.assignment(x, utyp.elem, "map literal")
}
default:
// if utyp is invalid, an error was reported before
if utyp != Typ[Invalid] {
check.errorf(e.Pos(), "invalid composite literal type %s", typ)
goto Error
}
}
x.mode = value
x.typ = typ
case *ast.ParenExpr:
kind := check.rawExpr(x, e.X, nil)
x.expr = e
return kind
case *ast.SelectorExpr:
check.selector(x, e)
case *ast.IndexExpr:
check.expr(x, e.X)
if x.mode == invalid {
goto Error
}
valid := false
length := int64(-1) // valid if >= 0
switch typ := x.typ.Underlying().(type) {
case *Basic:
if isString(typ) {
valid = true
if x.mode == constant_ {
length = int64(len(constant.StringVal(x.val)))
}
// an indexed string always yields a byte value
// (not a constant) even if the string and the
// index are constant
x.mode = value
x.typ = universeByte // use 'byte' name
}
case *Array:
valid = true
length = typ.len
if x.mode != variable {
x.mode = value
}
x.typ = typ.elem
case *Pointer:
if typ, _ := typ.base.Underlying().(*Array); typ != nil {
valid = true
length = typ.len
x.mode = variable
x.typ = typ.elem
}
case *Slice:
valid = true
x.mode = variable
x.typ = typ.elem
case *Map:
var key operand
check.expr(&key, e.Index)
check.assignment(&key, typ.key, "map index")
if x.mode == invalid {
goto Error
}
x.mode = mapindex
x.typ = typ.elem
x.expr = e
return expression
}
if !valid {
check.invalidOp(x.pos(), "cannot index %s", x)
goto Error
}
if e.Index == nil {
check.invalidAST(e.Pos(), "missing index for %s", x)
goto Error
}
check.index(e.Index, length)
// ok to continue
case *ast.SliceExpr:
check.expr(x, e.X)
if x.mode == invalid {
goto Error
}
valid := false
length := int64(-1) // valid if >= 0
switch typ := x.typ.Underlying().(type) {
case *Basic:
if isString(typ) {
if e.Slice3 {
check.invalidOp(x.pos(), "3-index slice of string")
goto Error
}
valid = true
if x.mode == constant_ {
length = int64(len(constant.StringVal(x.val)))
}
// spec: "For untyped string operands the result
// is a non-constant value of type string."
if typ.kind == UntypedString {
x.typ = Typ[String]
}
}
case *Array:
valid = true
length = typ.len
if x.mode != variable {
check.invalidOp(x.pos(), "cannot slice %s (value not addressable)", x)
goto Error
}
x.typ = &Slice{elem: typ.elem}
case *Pointer:
if typ, _ := typ.base.Underlying().(*Array); typ != nil {
valid = true
length = typ.len
x.typ = &Slice{elem: typ.elem}
}
case *Slice:
valid = true
// x.typ doesn't change
}
if !valid {
check.invalidOp(x.pos(), "cannot slice %s", x)
goto Error
}
x.mode = value
// spec: "Only the first index may be omitted; it defaults to 0."
if e.Slice3 && (e.High == nil || e.Max == nil) {
check.error(e.Rbrack, "2nd and 3rd index required in 3-index slice")
goto Error
}
// check indices
var ind [3]int64
for i, expr := range []ast.Expr{e.Low, e.High, e.Max} {
x := int64(-1)
switch {
case expr != nil:
// The "capacity" is only known statically for strings, arrays,
// and pointers to arrays, and it is the same as the length for
// those types.
max := int64(-1)
if length >= 0 {
max = length + 1
}
if t, ok := check.index(expr, max); ok && t >= 0 {
x = t
}
case i == 0:
// default is 0 for the first index
x = 0
case length >= 0:
// default is length (== capacity) otherwise
x = length
}
ind[i] = x
}
// constant indices must be in range
// (check.index already checks that existing indices >= 0)
L:
for i, x := range ind[:len(ind)-1] {
if x > 0 {
for _, y := range ind[i+1:] {
if y >= 0 && x > y {
check.errorf(e.Rbrack, "invalid slice indices: %d > %d", x, y)
break L // only report one error, ok to continue
}
}
}
}
case *ast.TypeAssertExpr:
check.expr(x, e.X)
if x.mode == invalid {
goto Error
}
xtyp, _ := x.typ.Underlying().(*Interface)
if xtyp == nil {
check.invalidOp(x.pos(), "%s is not an interface", x)
goto Error
}
// x.(type) expressions are handled explicitly in type switches
if e.Type == nil {
check.invalidAST(e.Pos(), "use of .(type) outside type switch")
goto Error
}
T := check.typ(e.Type)
if T == Typ[Invalid] {
goto Error
}
check.typeAssertion(x.pos(), x, xtyp, T)
x.mode = commaok
x.typ = T
case *ast.CallExpr:
return check.call(x, e)
case *ast.StarExpr:
check.exprOrType(x, e.X)
switch x.mode {
case invalid:
goto Error
case typexpr:
x.typ = &Pointer{base: x.typ}
default:
if typ, ok := x.typ.Underlying().(*Pointer); ok {
x.mode = variable
x.typ = typ.base
} else {
check.invalidOp(x.pos(), "cannot indirect %s", x)
goto Error
}
}
case *ast.UnaryExpr:
check.expr(x, e.X)
if x.mode == invalid {
goto Error
}
check.unary(x, e, e.Op)
if x.mode == invalid {
goto Error
}
if e.Op == token.ARROW {
x.expr = e
return statement // receive operations may appear in statement context
}
case *ast.BinaryExpr:
check.binary(x, e, e.X, e.Y, e.Op)
if x.mode == invalid {
goto Error
}
case *ast.KeyValueExpr:
// key:value expressions are handled in composite literals
check.invalidAST(e.Pos(), "no key:value expected")
goto Error
case *ast.ArrayType, *ast.StructType, *ast.FuncType,
*ast.InterfaceType, *ast.MapType, *ast.ChanType:
x.mode = typexpr
x.typ = check.typ(e)
// Note: rawExpr (caller of exprInternal) will call check.recordTypeAndValue
// even though check.typ has already called it. This is fine as both
// times the same expression and type are recorded. It is also not a
// performance issue because we only reach here for composite literal
// types, which are comparatively rare.
default:
panic(fmt.Sprintf("%s: unknown expression type %T", check.fset.Position(e.Pos()), e))
}
// everything went well
x.expr = e
return expression
Error:
x.mode = invalid
x.expr = e
return statement // avoid follow-up errors
}
// typeAssertion checks that x.(T) is legal; xtyp must be the type of x.
func (check *Checker) typeAssertion(pos token.Pos, x *operand, xtyp *Interface, T Type) {
method, wrongType := assertableTo(xtyp, T)
if method == nil {
return
}
var msg string
if wrongType {
msg = "wrong type for method"
} else {
msg = "missing method"
}
check.errorf(pos, "%s cannot have dynamic type %s (%s %s)", x, T, msg, method.name)
}
func (check *Checker) singleValue(x *operand) {
if x.mode == value {
// tuple types are never named - no need for underlying type below
if t, ok := x.typ.(*Tuple); ok {
assert(t.Len() != 1)
check.errorf(x.pos(), "%d-valued %s where single value is expected", t.Len(), x)
x.mode = invalid
}
}
}
// expr typechecks expression e and initializes x with the expression value.
// The result must be a single value.
// If an error occurred, x.mode is set to invalid.
//
func (check *Checker) expr(x *operand, e ast.Expr) {
check.multiExpr(x, e)
check.singleValue(x)
}
// multiExpr is like expr but the result may be a multi-value.
func (check *Checker) multiExpr(x *operand, e ast.Expr) {
check.rawExpr(x, e, nil)
var msg string
switch x.mode {
default:
return
case novalue:
msg = "%s used as value"
case builtin:
msg = "%s must be called"
case typexpr:
msg = "%s is not an expression"
}
check.errorf(x.pos(), msg, x)
x.mode = invalid
}
// exprWithHint typechecks expression e and initializes x with the expression value;
// hint is the type of a composite literal element.
// If an error occurred, x.mode is set to invalid.
//
func (check *Checker) exprWithHint(x *operand, e ast.Expr, hint Type) {
assert(hint != nil)
check.rawExpr(x, e, hint)
check.singleValue(x)
var msg string
switch x.mode {
default:
return
case novalue:
msg = "%s used as value"
case builtin:
msg = "%s must be called"
case typexpr:
msg = "%s is not an expression"
}
check.errorf(x.pos(), msg, x)
x.mode = invalid
}
// exprOrType typechecks expression or type e and initializes x with the expression value or type.
// If an error occurred, x.mode is set to invalid.
//
func (check *Checker) exprOrType(x *operand, e ast.Expr) {
check.rawExpr(x, e, nil)
check.singleValue(x)
if x.mode == novalue {
check.errorf(x.pos(), "%s used as value or type", x)
x.mode = invalid
}
}
| fitsFloat32 |
can_ccu.rs | #![allow(non_snake_case, non_upper_case_globals)]
#![allow(non_camel_case_types)]
//! FDCAN1
use crate::{RORegister, RWRegister};
#[cfg(not(feature = "nosync"))]
use core::marker::PhantomData;
/// FDCAN Core Release Register
pub mod FDCAN_CREL {
/// Core release
pub mod REL {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (4 bits: 0b1111 << 28)
pub const mask: u32 = 0b1111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Step of Core release
pub mod STEP {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (4 bits: 0b1111 << 24)
pub const mask: u32 = 0b1111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Sub-step of Core release
pub mod SUBSTEP {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (4 bits: 0b1111 << 20)
pub const mask: u32 = 0b1111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Timestamp Year
pub mod YEAR {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (4 bits: 0b1111 << 16)
pub const mask: u32 = 0b1111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Timestamp Month
pub mod MON {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (8 bits: 0xff << 8)
pub const mask: u32 = 0xff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Timestamp Day
pub mod DAY {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (8 bits: 0xff << 0)
pub const mask: u32 = 0xff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Core Release Register
pub mod FDCAN_ENDN {
/// Endiannes Test Value
pub mod ETV {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (32 bits: 0xffffffff << 0)
pub const mask: u32 = 0xffffffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Data Bit Timing and Prescaler Register
pub mod FDCAN_DBTP {
/// Synchronization Jump Width
pub mod DSJW {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (4 bits: 0b1111 << 0)
pub const mask: u32 = 0b1111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Data time segment after sample point
pub mod DTSEG2 {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (4 bits: 0b1111 << 4)
pub const mask: u32 = 0b1111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Data time segment after sample point
pub mod DTSEG1 {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (5 bits: 0b11111 << 8)
pub const mask: u32 = 0b11111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Data BIt Rate Prescaler
pub mod DBRP {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (5 bits: 0b11111 << 16)
pub const mask: u32 = 0b11111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Transceiver Delay Compensation
pub mod TDC {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Test Register
pub mod FDCAN_TEST {
/// Loop Back mode
pub mod LBCK {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Loop Back mode
pub mod TX {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (2 bits: 0b11 << 5)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Control of Transmit Pin
pub mod RX {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN RAM Watchdog Register
pub mod FDCAN_RWD {
/// Watchdog value
pub mod WDV {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (8 bits: 0xff << 8)
pub const mask: u32 = 0xff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Watchdog configuration
pub mod WDC {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (8 bits: 0xff << 0)
pub const mask: u32 = 0xff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN CC Control Register
pub mod FDCAN_CCCR {
/// Initialization
pub mod INIT {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Configuration Change Enable
pub mod CCE {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// ASM Restricted Operation Mode
pub mod ASM {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Clock Stop Acknowledge
pub mod CSA {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Clock Stop Request
pub mod CSR {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Bus Monitoring Mode
pub mod MON {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Disable Automatic Retransmission
pub mod DAR {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Test Mode Enable
pub mod TEST {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// FD Operation Enable
pub mod FDOE {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// FDCAN Bit Rate Switching
pub mod BSE {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Protocol Exception Handling Disable
pub mod PXHD {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Edge Filtering during Bus Integration
pub mod EFBI {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// TXP
pub mod TXP {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Non ISO Operation
pub mod NISO {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Nominal Bit Timing and Prescaler Register
pub mod FDCAN_NBTP {
/// NSJW: Nominal (Re)Synchronization Jump Width
pub mod NSJW {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (7 bits: 0x7f << 25)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Bit Rate Prescaler
pub mod NBRP {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (9 bits: 0x1ff << 16)
pub const mask: u32 = 0x1ff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Nominal Time segment before sample point
pub mod NTSEG1 {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (8 bits: 0xff << 8)
pub const mask: u32 = 0xff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Nominal Time segment after sample point
pub mod TSEG2 {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (7 bits: 0x7f << 0)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Timestamp Counter Configuration Register
pub mod FDCAN_TSCC {
/// Timestamp Counter Prescaler
pub mod TCP {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (4 bits: 0b1111 << 16)
pub const mask: u32 = 0b1111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Timestamp Select
pub mod TSS {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (2 bits: 0b11 << 0)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Timestamp Counter Value Register
pub mod FDCAN_TSCV {
/// Timestamp Counter
pub mod TSC {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (16 bits: 0xffff << 0)
pub const mask: u32 = 0xffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Timeout Counter Configuration Register
pub mod FDCAN_TOCC {
/// Enable Timeout Counter
pub mod ETOC {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Timeout Select
pub mod TOS {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (2 bits: 0b11 << 1)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Timeout Period
pub mod TOP {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (16 bits: 0xffff << 16)
pub const mask: u32 = 0xffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Timeout Counter Value Register
pub mod FDCAN_TOCV {
/// Timeout Counter
pub mod TOC {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (16 bits: 0xffff << 0)
pub const mask: u32 = 0xffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Error Counter Register
pub mod FDCAN_ECR {
/// AN Error Logging
pub mod CEL {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (8 bits: 0xff << 16)
pub const mask: u32 = 0xff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Receive Error Passive
pub mod RP {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Receive Error Counter
pub mod TREC {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (7 bits: 0x7f << 8)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Transmit Error Counter
pub mod TEC {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (8 bits: 0xff << 0)
pub const mask: u32 = 0xff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Protocol Status Register
pub mod FDCAN_PSR {
/// Last Error Code
pub mod LEC {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (3 bits: 0b111 << 0)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Activity
pub mod ACT {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (2 bits: 0b11 << 3)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Error Passive
pub mod EP {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Warning Status
pub mod EW {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Bus_Off Status
pub mod BO {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Data Last Error Code
pub mod DLEC {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (3 bits: 0b111 << 8)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// ESI flag of last received FDCAN Message
pub mod RESI {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// BRS flag of last received FDCAN Message
pub mod RBRS {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Received FDCAN Message
pub mod REDL {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Protocol Exception Event
pub mod PXE {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Transmitter Delay Compensation Value
pub mod TDCV {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (7 bits: 0x7f << 16)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Transmitter Delay Compensation Register
pub mod FDCAN_TDCR {
/// Transmitter Delay Compensation Filter Window Length
pub mod TDCF {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (7 bits: 0x7f << 0)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Transmitter Delay Compensation Offset
pub mod TDCO {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (7 bits: 0x7f << 8)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Interrupt Register
pub mod FDCAN_IR {
/// Rx FIFO 0 New Message
pub mod RF0N {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Full
pub mod RF0W {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Full
pub mod RF0F {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Message Lost
pub mod RF0L {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 New Message
pub mod RF1N {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Watermark Reached
pub mod RF1W {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Watermark Reached
pub mod RF1F {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Message Lost
pub mod RF1L {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// High Priority Message
pub mod HPM {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Transmission Completed
pub mod TC {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Transmission Cancellation Finished
pub mod TCF {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx FIFO Empty
pub mod TEF {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Event FIFO New Entry
pub mod TEFN {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Event FIFO Watermark Reached
pub mod TEFW {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Event FIFO Full
pub mod TEFF {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Event FIFO Element Lost
pub mod TEFL {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Timestamp Wraparound
pub mod TSW {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Message RAM Access Failure
pub mod MRAF {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Timeout Occurred
pub mod TOO {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Message stored to Dedicated Rx Buffer
pub mod DRX {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Error Logging Overflow
pub mod ELO {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Error Passive
pub mod EP {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Warning Status
pub mod EW {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Bus_Off Status
pub mod BO {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Watchdog Interrupt
pub mod WDI {
/// Offset (26 bits)
pub const offset: u32 = 26;
/// Mask (1 bit: 1 << 26)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Protocol Error in Arbitration Phase (Nominal Bit Time is used)
pub mod PEA {
/// Offset (27 bits)
pub const offset: u32 = 27;
/// Mask (1 bit: 1 << 27)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Protocol Error in Data Phase (Data Bit Time is used)
pub mod PED {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Access to Reserved Address
pub mod ARA {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Interrupt Enable Register
pub mod FDCAN_IE {
/// Rx FIFO 0 New Message Enable
pub mod RF0NE {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Full Enable
pub mod RF0WE {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Full Enable
pub mod RF0FE {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Message Lost Enable
pub mod RF0LE {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 New Message Enable
pub mod RF1NE {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Watermark Reached Enable
pub mod RF1WE {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Watermark Reached Enable
pub mod RF1FE {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Message Lost Enable
pub mod RF1LE {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// High Priority Message Enable
pub mod HPME {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Transmission Completed Enable
pub mod TCE {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Transmission Cancellation Finished Enable
pub mod TCFE {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx FIFO Empty Enable
pub mod TEFE {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Event FIFO New Entry Enable
pub mod TEFNE {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Event FIFO Watermark Reached Enable
pub mod TEFWE {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Event FIFO Full Enable
pub mod TEFFE {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Event FIFO Element Lost Enable
pub mod TEFLE {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Timestamp Wraparound Enable
pub mod TSWE {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Message RAM Access Failure Enable
pub mod MRAFE {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Timeout Occurred Enable
pub mod TOOE {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Message stored to Dedicated Rx Buffer Enable
pub mod DRXE {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Bit Error Corrected Interrupt Enable
pub mod BECE {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (1 bit: 1 << 20)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Bit Error Uncorrected Interrupt Enable
pub mod BEUE {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Error Logging Overflow Enable
pub mod ELOE {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Error Passive Enable
pub mod EPE {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Warning Status Enable
pub mod EWE {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Bus_Off Status Enable
pub mod BOE {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Watchdog Interrupt Enable
pub mod WDIE {
/// Offset (26 bits)
pub const offset: u32 = 26;
/// Mask (1 bit: 1 << 26)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Protocol Error in Arbitration Phase Enable
pub mod PEAE {
/// Offset (27 bits)
pub const offset: u32 = 27;
/// Mask (1 bit: 1 << 27)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Protocol Error in Data Phase Enable
pub mod PEDE {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Access to Reserved Address Enable
pub mod ARAE {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Interrupt Line Select Register
pub mod FDCAN_ILS {
/// Rx FIFO 0 New Message Interrupt Line
pub mod RF0NL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Watermark Reached Interrupt Line
pub mod RF0WL {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Full Interrupt Line
pub mod RF0FL {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Message Lost Interrupt Line
pub mod RF0LL {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 New Message Interrupt Line
pub mod RF1NL {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Watermark Reached Interrupt Line
pub mod RF1WL {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Full Interrupt Line
pub mod RF1FL {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Message Lost Interrupt Line
pub mod RF1LL {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// High Priority Message Interrupt Line
pub mod HPML {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Transmission Completed Interrupt Line
pub mod TCL {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Transmission Cancellation Finished Interrupt Line
pub mod TCFL {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx FIFO Empty Interrupt Line
pub mod TEFL {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Event FIFO New Entry Interrupt Line
pub mod TEFNL {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Event FIFO Watermark Reached Interrupt Line
pub mod TEFWL {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Event FIFO Full Interrupt Line
pub mod TEFFL {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Event FIFO Element Lost Interrupt Line
pub mod TEFLL {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Timestamp Wraparound Interrupt Line
pub mod TSWL {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Message RAM Access Failure Interrupt Line
pub mod MRAFL {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Timeout Occurred Interrupt Line
pub mod TOOL {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Message stored to Dedicated Rx Buffer Interrupt Line
pub mod DRXL {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Bit Error Corrected Interrupt Line
pub mod BECL {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (1 bit: 1 << 20)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Bit Error Uncorrected Interrupt Line
pub mod BEUL {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Error Logging Overflow Interrupt Line
pub mod ELOL {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Error Passive Interrupt Line
pub mod EPL {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Warning Status Interrupt Line
pub mod EWL {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Bus_Off Status
pub mod BOL {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Watchdog Interrupt Line
pub mod WDIL {
/// Offset (26 bits)
pub const offset: u32 = 26;
/// Mask (1 bit: 1 << 26)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Protocol Error in Arbitration Phase Line
pub mod PEAL {
/// Offset (27 bits)
pub const offset: u32 = 27;
/// Mask (1 bit: 1 << 27)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Protocol Error in Data Phase Line
pub mod PEDL {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Access to Reserved Address Line
pub mod ARAL {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Interrupt Line Enable Register
pub mod FDCAN_ILE {
/// Enable Interrupt Line 0
pub mod EINT0 {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Enable Interrupt Line 1
pub mod EINT1 {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Global Filter Configuration Register
pub mod FDCAN_GFC {
/// Reject Remote Frames Extended
pub mod RRFE {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Reject Remote Frames Standard
pub mod RRFS {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Accept Non-matching Frames Extended
pub mod ANFE {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (2 bits: 0b11 << 2)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Accept Non-matching Frames Standard
pub mod ANFS {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (2 bits: 0b11 << 4)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Standard ID Filter Configuration Register
pub mod FDCAN_SIDFC {
/// Filter List Standard Start Address
pub mod FLSSA {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (14 bits: 0x3fff << 2)
pub const mask: u32 = 0x3fff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// List Size Standard
pub mod LSS {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (8 bits: 0xff << 16)
pub const mask: u32 = 0xff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Extended ID Filter Configuration Register
pub mod FDCAN_XIDFC {
/// Filter List Standard Start Address
pub mod FLESA {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (14 bits: 0x3fff << 2)
pub const mask: u32 = 0x3fff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// List Size Extended
pub mod LSE {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (8 bits: 0xff << 16)
pub const mask: u32 = 0xff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Extended ID and Mask Register
pub mod FDCAN_XIDAM {
/// Extended ID Mask
pub mod EIDM {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (29 bits: 0x1fffffff << 0)
pub const mask: u32 = 0x1fffffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN High Priority Message Status Register
pub mod FDCAN_HPMS {
/// Buffer Index
pub mod BIDX {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (6 bits: 0x3f << 0)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Message Storage Indicator
pub mod MSI {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (2 bits: 0b11 << 6)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Filter Index
pub mod FIDX {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (7 bits: 0x7f << 8)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Filter List
pub mod FLST {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN New Data 1 Register
pub mod FDCAN_NDAT1 {
/// New data
pub mod ND0 {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND1 {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND2 {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND3 {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND4 {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND5 {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND6 {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND7 {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND8 {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND9 {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND10 {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND11 {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND12 {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND13 {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND14 {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND15 {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND16 {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND17 {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND18 {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND19 {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND20 {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (1 bit: 1 << 20)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND21 {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND22 {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND23 {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND24 {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND25 {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND26 {
/// Offset (26 bits)
pub const offset: u32 = 26;
/// Mask (1 bit: 1 << 26)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND27 {
/// Offset (27 bits)
pub const offset: u32 = 27;
/// Mask (1 bit: 1 << 27)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND28 {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND29 {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND30 {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (1 bit: 1 << 30)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND31 {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN New Data 2 Register
pub mod FDCAN_NDAT2 {
/// New data
pub mod ND32 {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND33 {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND34 {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND35 {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND36 {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND37 {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND38 {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND39 {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND40 {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND41 {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND42 {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND43 {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND44 {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND45 {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND46 {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND47 {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND48 {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND49 {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND50 {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND51 {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND52 {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (1 bit: 1 << 20)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND53 {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND54 {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND55 {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND56 {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND57 {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND58 {
/// Offset (26 bits)
pub const offset: u32 = 26;
/// Mask (1 bit: 1 << 26)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND59 {
/// Offset (27 bits)
pub const offset: u32 = 27;
/// Mask (1 bit: 1 << 27)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND60 {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND61 {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND62 {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (1 bit: 1 << 30)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// New data
pub mod ND63 {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Rx FIFO 0 Configuration Register
pub mod FDCAN_RXF0C {
/// Rx FIFO 0 Start Address
pub mod F0SA {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (14 bits: 0x3fff << 2)
pub const mask: u32 = 0x3fff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Size
pub mod F0S {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (8 bits: 0xff << 16)
pub const mask: u32 = 0xff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// FIFO 0 Watermark
pub mod F0WM {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (8 bits: 0xff << 24)
pub const mask: u32 = 0xff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Rx FIFO 0 Status Register
pub mod FDCAN_RXF0S {
/// Rx FIFO 0 Fill Level
pub mod F0FL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (7 bits: 0x7f << 0)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Get Index
pub mod F0G {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (6 bits: 0x3f << 8)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Put Index
pub mod F0P {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (6 bits: 0x3f << 16)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Full
pub mod F0F {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Message Lost
pub mod RF0L {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// CAN Rx FIFO 0 Acknowledge Register
pub mod FDCAN_RXF0A {
/// Rx FIFO 0 Acknowledge Index
pub mod FA01 {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (6 bits: 0x3f << 0)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Rx Buffer Configuration Register
pub mod FDCAN_RXBC {
/// Rx Buffer Start Address
pub mod RBSA {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (14 bits: 0x3fff << 2)
pub const mask: u32 = 0x3fff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Rx FIFO 1 Configuration Register
pub mod FDCAN_RXF1C {
/// Rx FIFO 1 Start Address
pub mod F1SA {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (14 bits: 0x3fff << 2)
pub const mask: u32 = 0x3fff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Size
pub mod F1S {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (7 bits: 0x7f << 16)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Watermark
pub mod F1WM {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (7 bits: 0x7f << 24)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Rx FIFO 1 Status Register
pub mod FDCAN_RXF1S {
/// Rx FIFO 1 Fill Level
pub mod F1FL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (7 bits: 0x7f << 0)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Get Index
pub mod F1GI {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (7 bits: 0x7f << 8)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Put Index
pub mod F1PI {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (7 bits: 0x7f << 16)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Full
pub mod F1F {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 1 Message Lost
pub mod RF1L {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Debug Message Status
pub mod DMS {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (2 bits: 0b11 << 30)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Rx FIFO 1 Acknowledge Register
pub mod FDCAN_RXF1A {
/// Rx FIFO 1 Acknowledge Index
pub mod F1AI {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (6 bits: 0x3f << 0)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Rx Buffer Element Size Configuration Register
pub mod FDCAN_RXESC {
/// Rx FIFO 1 Data Field Size:
pub mod F0DS {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (3 bits: 0b111 << 0)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx FIFO 0 Data Field Size:
pub mod F1DS {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (3 bits: 0b111 << 4)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Rx Buffer Data Field Size:
pub mod RBDS {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (3 bits: 0b111 << 8)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Tx Buffer Configuration Register
pub mod FDCAN_TXBC {
/// Tx Buffers Start Address
pub mod TBSA {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (14 bits: 0x3fff << 2)
pub const mask: u32 = 0x3fff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Number of Dedicated Transmit Buffers
pub mod NDTB {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (6 bits: 0x3f << 16)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Transmit FIFO/Queue Size
pub mod TFQS {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (6 bits: 0x3f << 24)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx FIFO/Queue Mode
pub mod TFQM {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (1 bit: 1 << 30)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Tx FIFO/Queue Status Register
pub mod FDCAN_TXFQS {
/// Tx FIFO Free Level
pub mod TFFL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (6 bits: 0x3f << 0)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// TFGI
pub mod TFGI {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (5 bits: 0b11111 << 8)
pub const mask: u32 = 0b11111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx FIFO/Queue Put Index
pub mod TFQPI {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (5 bits: 0b11111 << 16)
pub const mask: u32 = 0b11111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx FIFO/Queue Full
pub mod TFQF {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Tx Buffer Element Size Configuration Register
pub mod FDCAN_TXESC {
/// Tx Buffer Data Field Size:
pub mod TBDS {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (3 bits: 0b111 << 0)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Tx Buffer Request Pending Register
pub mod FDCAN_TXBRP {
/// Transmission Request Pending
pub mod TRP {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (32 bits: 0xffffffff << 0)
pub const mask: u32 = 0xffffffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Tx Buffer Add Request Register
pub mod FDCAN_TXBAR {
/// Add Request
pub mod AR {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (32 bits: 0xffffffff << 0)
pub const mask: u32 = 0xffffffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Tx Buffer Cancellation Request Register
pub mod FDCAN_TXBCR {
/// Cancellation Request
pub mod CR {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (32 bits: 0xffffffff << 0)
pub const mask: u32 = 0xffffffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Tx Buffer Transmission Occurred Register
pub mod FDCAN_TXBTO {
/// Transmission Occurred.
pub mod TO {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (32 bits: 0xffffffff << 0)
pub const mask: u32 = 0xffffffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Tx Buffer Cancellation Finished Register
pub mod FDCAN_TXBCF {
/// Cancellation Finished
pub mod CF {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (32 bits: 0xffffffff << 0)
pub const mask: u32 = 0xffffffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Tx Buffer Transmission Interrupt Enable Register
pub mod FDCAN_TXBTIE {
/// Transmission Interrupt Enable
pub mod TIE {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (32 bits: 0xffffffff << 0)
pub const mask: u32 = 0xffffffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Tx Buffer Cancellation Finished Interrupt Enable Register
pub mod FDCAN_TXBCIE {
/// Cancellation Finished Interrupt Enable
pub mod CF {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (32 bits: 0xffffffff << 0)
pub const mask: u32 = 0xffffffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Tx Event FIFO Configuration Register
pub mod FDCAN_TXEFC {
/// Event FIFO Start Address
pub mod EFSA {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (14 bits: 0x3fff << 2)
pub const mask: u32 = 0x3fff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Event FIFO Size
pub mod EFS {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (6 bits: 0x3f << 16)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Event FIFO Watermark
pub mod EFWM {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (6 bits: 0x3f << 24)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Tx Event FIFO Status Register
pub mod FDCAN_TXEFS {
/// Event FIFO Fill Level
pub mod EFFL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (6 bits: 0x3f << 0)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Event FIFO Get Index.
pub mod EFGI {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (5 bits: 0b11111 << 8)
pub const mask: u32 = 0b11111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Event FIFO Full.
pub mod EFF {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Event FIFO Element Lost.
pub mod TEFL {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN Tx Event FIFO Acknowledge Register
pub mod FDCAN_TXEFA {
/// Event FIFO Acknowledge Index
pub mod EFAI {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (5 bits: 0b11111 << 0)
pub const mask: u32 = 0b11111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Trigger Memory Configuration Register
pub mod FDCAN_TTTMC {
/// Trigger Memory Start Address
pub mod TMSA {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (14 bits: 0x3fff << 2)
pub const mask: u32 = 0x3fff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Trigger Memory Elements
pub mod TME {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (7 bits: 0x7f << 16)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Reference Message Configuration Register
pub mod FDCAN_TTRMC {
/// Reference Identifier.
pub mod RID {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (29 bits: 0x1fffffff << 0)
pub const mask: u32 = 0x1fffffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Extended Identifier
pub mod XTD {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (1 bit: 1 << 30)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Reference Message Payload Select
pub mod RMPS {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Operation Configuration Register
pub mod FDCAN_TTOCF {
/// Operation Mode
pub mod OM {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (2 bits: 0b11 << 0)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Gap Enable
pub mod GEN {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Time Master
pub mod TM {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// LD of Synchronization Deviation Limit
pub mod LDSDL {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (3 bits: 0b111 << 5)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Initial Reference Trigger Offset
pub mod IRTO {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (7 bits: 0x7f << 8)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Enable External Clock Synchronization
pub mod EECS {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Application Watchdog Limit
pub mod AWL {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (8 bits: 0xff << 16)
pub const mask: u32 = 0xff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Enable Global Time Filtering
pub mod EGTF {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Enable Clock Calibration
pub mod ECC {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Event Trigger Polarity
pub mod EVTP {
/// Offset (26 bits)
pub const offset: u32 = 26;
/// Mask (1 bit: 1 << 26)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Matrix Limits Register
pub mod FDCAN_TTMLM {
/// Cycle Count Max
pub mod CCM {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (6 bits: 0x3f << 0)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Cycle Start Synchronization
pub mod CSS {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (2 bits: 0b11 << 6)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Enable Window
pub mod TXEW {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (4 bits: 0b1111 << 8)
pub const mask: u32 = 0b1111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Expected Number of Tx Triggers
pub mod ENTT {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (12 bits: 0xfff << 16)
pub const mask: u32 = 0xfff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TUR Configuration Register
pub mod FDCAN_TURCF {
/// Numerator Configuration Low.
pub mod NCL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (16 bits: 0xffff << 0)
pub const mask: u32 = 0xffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Denominator Configuration.
pub mod DC {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (14 bits: 0x3fff << 16)
pub const mask: u32 = 0x3fff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Enable Local Time
pub mod ELT {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Operation Control Register
pub mod FDCAN_TTOCN {
/// Set Global time
pub mod SGT {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// External Clock Synchronization
pub mod ECS {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Stop Watch Polarity
pub mod SWP {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Stop Watch Source.
pub mod SWS {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (2 bits: 0b11 << 3)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Register Time Mark Interrupt Pulse Enable
pub mod RTIE {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Register Time Mark Compare
pub mod TMC {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (2 bits: 0b11 << 6)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Trigger Time Mark Interrupt Pulse Enable
pub mod TTIE {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Gap Control Select
pub mod GCS {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Finish Gap.
pub mod FGP {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Time Mark Gap
pub mod TMG {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Next is Gap
pub mod NIG {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// External Synchronization Control
pub mod ESCN {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// TT Operation Control Register Locked
pub mod LCKC {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Global Time Preset Register
pub mod CAN_TTGTP {
/// Time Preset
pub mod NCL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (16 bits: 0xffff << 0)
pub const mask: u32 = 0xffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Cycle Time Target Phase
pub mod CTP {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (16 bits: 0xffff << 16)
pub const mask: u32 = 0xffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Time Mark Register
pub mod FDCAN_TTTMK {
/// Time Mark
pub mod TM {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (16 bits: 0xffff << 0)
pub const mask: u32 = 0xffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Time Mark Cycle Code
pub mod TICC {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (7 bits: 0x7f << 16)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// TT Time Mark Register Locked
pub mod LCKM {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Interrupt Register
pub mod FDCAN_TTIR {
/// Start of Basic Cycle
pub mod SBC {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Start of Matrix Cycle
pub mod SMC {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Change of Synchronization Mode
pub mod CSM {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Start of Gap
pub mod SOG {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Register Time Mark Interrupt.
pub mod RTMI {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Trigger Time Mark Event Internal
pub mod TTMI {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Stop Watch Event
pub mod SWE {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Global Time Wrap
pub mod GTW {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Global Time Discontinuity
pub mod GTD {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Global Time Error
pub mod GTE {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Count Underflow
pub mod TXU {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Count Overflow
pub mod TXO {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Scheduling Error 1
pub mod SE1 {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Scheduling Error 2
pub mod SE2 {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Error Level Changed.
pub mod ELC {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Initialization Watch Trigger
pub mod IWTG {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Watch Trigger
pub mod WT {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Application Watchdog
pub mod AW {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Configuration Error
pub mod CER {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Interrupt Enable Register
pub mod FDCAN_TTIE {
/// Start of Basic Cycle Interrupt Enable
pub mod SBCE {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Start of Matrix Cycle Interrupt Enable
pub mod SMCE {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Change of Synchronization Mode Interrupt Enable
pub mod CSME {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Start of Gap Interrupt Enable
pub mod SOGE {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Register Time Mark Interrupt Enable
pub mod RTMIE {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Trigger Time Mark Event Internal Interrupt Enable
pub mod TTMIE {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Stop Watch Event Interrupt Enable
pub mod SWEE {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Global Time Wrap Interrupt Enable
pub mod GTWE {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Global Time Discontinuity Interrupt Enable
pub mod GTDE {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Global Time Error Interrupt Enable
pub mod GTEE {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Count Underflow Interrupt Enable
pub mod TXUE {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Count Overflow Interrupt Enable
pub mod TXOE {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Scheduling Error 1 Interrupt Enable
pub mod SE1E {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Scheduling Error 2 Interrupt Enable
pub mod SE2E {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Change Error Level Interrupt Enable
pub mod ELCE {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Initialization Watch Trigger Interrupt Enable
pub mod IWTGE {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Watch Trigger Interrupt Enable
pub mod WTE {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Application Watchdog Interrupt Enable
pub mod AWE {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Configuration Error Interrupt Enable
pub mod CERE {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Interrupt Line Select Register
pub mod FDCAN_TTILS {
/// Start of Basic Cycle Interrupt Line
pub mod SBCL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Start of Matrix Cycle Interrupt Line
pub mod SMCL {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Change of Synchronization Mode Interrupt Line
pub mod CSML {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Start of Gap Interrupt Line
pub mod SOGL {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Register Time Mark Interrupt Line
pub mod RTMIL {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Trigger Time Mark Event Internal Interrupt Line
pub mod TTMIL {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Stop Watch Event Interrupt Line
pub mod SWEL {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Global Time Wrap Interrupt Line
pub mod GTWL {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Global Time Discontinuity Interrupt Line
pub mod GTDL {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Global Time Error Interrupt Line
pub mod GTEL {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Count Underflow Interrupt Line
pub mod TXUL {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Tx Count Overflow Interrupt Line
pub mod TXOL {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Scheduling Error 1 Interrupt Line
pub mod SE1L {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Scheduling Error 2 Interrupt Line
pub mod SE2L {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Change Error Level Interrupt Line
pub mod ELCL {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Initialization Watch Trigger Interrupt Line
pub mod IWTGL {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Watch Trigger Interrupt Line
pub mod WTL {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Application Watchdog Interrupt Line
pub mod AWL {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Configuration Error Interrupt Line
pub mod CERL {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Operation Status Register
pub mod FDCAN_TTOST {
/// Error Level
pub mod EL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (2 bits: 0b11 << 0)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Master State.
pub mod MS {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (2 bits: 0b11 << 2)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Synchronization State
pub mod SYS {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (2 bits: 0b11 << 4)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Quality of Global Time Phase
pub mod GTP {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Quality of Clock Speed
pub mod QCS {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Reference Trigger Offset
pub mod RTO {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (8 bits: 0xff << 8)
pub const mask: u32 = 0xff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Wait for Global Time Discontinuity
pub mod WGTD {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Gap Finished Indicator.
pub mod GFI {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Time Master Priority
pub mod TMP {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (3 bits: 0b111 << 24)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Gap Started Indicator.
pub mod GSI {
/// Offset (27 bits)
pub const offset: u32 = 27;
/// Mask (1 bit: 1 << 27)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Wait for Event
pub mod WFE {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Application Watchdog Event
pub mod AWE {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Wait for External Clock Synchronization
pub mod WECS {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (1 bit: 1 << 30)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Schedule Phase Lock
pub mod SPL {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TUR Numerator Actual Register
pub mod FDCAN_TURNA {
/// Numerator Actual Value
pub mod NAV {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (18 bits: 0x3ffff << 0)
pub const mask: u32 = 0x3ffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Local and Global Time Register
pub mod FDCAN_TTLGT {
/// Local Time
pub mod LT {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (16 bits: 0xffff << 0)
pub const mask: u32 = 0xffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Global Time
pub mod GT {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (16 bits: 0xffff << 16)
pub const mask: u32 = 0xffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Cycle Time and Count Register
pub mod FDCAN_TTCTC {
/// Cycle Time
pub mod CT {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (16 bits: 0xffff << 0)
pub const mask: u32 = 0xffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Cycle Count
pub mod CC {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (6 bits: 0x3f << 16)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Capture Time Register
pub mod FDCAN_TTCPT {
/// Cycle Count Value
pub mod CT {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (6 bits: 0x3f << 0)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Stop Watch Value
pub mod SWV {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (16 bits: 0xffff << 16)
pub const mask: u32 = 0xffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Cycle Sync Mark Register
pub mod FDCAN_TTCSM {
/// Cycle Sync Mark
pub mod CSM {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (16 bits: 0xffff << 0)
pub const mask: u32 = 0xffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// FDCAN TT Trigger Select Register
pub mod FDCAN_TTTS {
/// Stop watch trigger input selection
pub mod SWTDEL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (2 bits: 0b11 << 0)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Event trigger input selection
pub mod EVTSEL {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (2 bits: 0b11 << 4)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
#[repr(C)]
pub struct RegisterBlock {
/// FDCAN Core Release Register
pub FDCAN_CREL: RORegister<u32>,
/// FDCAN Core Release Register
pub FDCAN_ENDN: RORegister<u32>,
_reserved1: [u32; 1],
/// FDCAN Data Bit Timing and Prescaler Register
pub FDCAN_DBTP: RORegister<u32>,
/// FDCAN Test Register
pub FDCAN_TEST: RORegister<u32>,
/// FDCAN RAM Watchdog Register
pub FDCAN_RWD: RORegister<u32>,
/// FDCAN CC Control Register
pub FDCAN_CCCR: RWRegister<u32>,
/// FDCAN Nominal Bit Timing and Prescaler Register
pub FDCAN_NBTP: RWRegister<u32>,
/// FDCAN Timestamp Counter Configuration Register
pub FDCAN_TSCC: RWRegister<u32>,
/// FDCAN Timestamp Counter Value Register
pub FDCAN_TSCV: RWRegister<u32>,
/// FDCAN Timeout Counter Configuration Register
pub FDCAN_TOCC: RWRegister<u32>,
/// FDCAN Timeout Counter Value Register
pub FDCAN_TOCV: RWRegister<u32>,
_reserved2: [u32; 4],
/// FDCAN Error Counter Register
pub FDCAN_ECR: RWRegister<u32>,
/// FDCAN Protocol Status Register
pub FDCAN_PSR: RWRegister<u32>,
/// FDCAN Transmitter Delay Compensation Register
pub FDCAN_TDCR: RORegister<u32>,
_reserved3: [u32; 1],
/// FDCAN Interrupt Register
pub FDCAN_IR: RORegister<u32>,
/// FDCAN Interrupt Enable Register
pub FDCAN_IE: RWRegister<u32>,
/// FDCAN Interrupt Line Select Register
pub FDCAN_ILS: RORegister<u32>,
/// FDCAN Interrupt Line Enable Register
pub FDCAN_ILE: RWRegister<u32>,
_reserved4: [u32; 8],
/// FDCAN Global Filter Configuration Register
pub FDCAN_GFC: RWRegister<u32>,
/// FDCAN Standard ID Filter Configuration Register
pub FDCAN_SIDFC: RWRegister<u32>,
/// FDCAN Extended ID Filter Configuration Register
pub FDCAN_XIDFC: RWRegister<u32>,
_reserved5: [u32; 1],
/// FDCAN Extended ID and Mask Register
pub FDCAN_XIDAM: RWRegister<u32>,
/// FDCAN High Priority Message Status Register
pub FDCAN_HPMS: RORegister<u32>,
/// FDCAN New Data 1 Register
pub FDCAN_NDAT1: RORegister<u32>,
/// FDCAN New Data 2 Register
pub FDCAN_NDAT2: RORegister<u32>,
/// FDCAN Rx FIFO 0 Configuration Register
pub FDCAN_RXF0C: RWRegister<u32>,
/// FDCAN Rx FIFO 0 Status Register
pub FDCAN_RXF0S: RWRegister<u32>,
/// CAN Rx FIFO 0 Acknowledge Register
pub FDCAN_RXF0A: RWRegister<u32>,
/// FDCAN Rx Buffer Configuration Register
pub FDCAN_RXBC: RWRegister<u32>,
/// FDCAN Rx FIFO 1 Configuration Register
pub FDCAN_RXF1C: RWRegister<u32>,
/// FDCAN Rx FIFO 1 Status Register
pub FDCAN_RXF1S: RWRegister<u32>,
/// FDCAN Rx FIFO 1 Acknowledge Register
pub FDCAN_RXF1A: RWRegister<u32>,
/// FDCAN Rx Buffer Element Size Configuration Register
pub FDCAN_RXESC: RWRegister<u32>,
/// FDCAN Tx Buffer Configuration Register
pub FDCAN_TXBC: RWRegister<u32>,
/// FDCAN Tx FIFO/Queue Status Register
pub FDCAN_TXFQS: RORegister<u32>,
/// FDCAN Tx Buffer Element Size Configuration Register
pub FDCAN_TXESC: RWRegister<u32>,
/// FDCAN Tx Buffer Request Pending Register
pub FDCAN_TXBRP: RORegister<u32>,
/// FDCAN Tx Buffer Add Request Register
pub FDCAN_TXBAR: RWRegister<u32>,
/// FDCAN Tx Buffer Cancellation Request Register
pub FDCAN_TXBCR: RWRegister<u32>,
/// FDCAN Tx Buffer Transmission Occurred Register
pub FDCAN_TXBTO: RWRegister<u32>,
/// FDCAN Tx Buffer Cancellation Finished Register
pub FDCAN_TXBCF: RORegister<u32>,
/// FDCAN Tx Buffer Transmission Interrupt Enable Register
pub FDCAN_TXBTIE: RWRegister<u32>,
/// FDCAN Tx Buffer Cancellation Finished Interrupt Enable Register
pub FDCAN_TXBCIE: RWRegister<u32>,
_reserved6: [u32; 2],
/// FDCAN Tx Event FIFO Configuration Register
pub FDCAN_TXEFC: RWRegister<u32>,
/// FDCAN Tx Event FIFO Status Register
pub FDCAN_TXEFS: RWRegister<u32>,
/// FDCAN Tx Event FIFO Acknowledge Register
pub FDCAN_TXEFA: RWRegister<u32>,
_reserved7: [u32; 1],
/// FDCAN TT Trigger Memory Configuration Register
pub FDCAN_TTTMC: RWRegister<u32>,
/// FDCAN TT Reference Message Configuration Register
pub FDCAN_TTRMC: RWRegister<u32>,
/// FDCAN TT Operation Configuration Register
pub FDCAN_TTOCF: RWRegister<u32>,
/// FDCAN TT Matrix Limits Register
pub FDCAN_TTMLM: RWRegister<u32>,
/// FDCAN TUR Configuration Register
pub FDCAN_TURCF: RWRegister<u32>,
/// FDCAN TT Operation Control Register
pub FDCAN_TTOCN: RWRegister<u32>,
/// FDCAN TT Global Time Preset Register
pub CAN_TTGTP: RWRegister<u32>,
/// FDCAN TT Time Mark Register
pub FDCAN_TTTMK: RWRegister<u32>,
/// FDCAN TT Interrupt Register
pub FDCAN_TTIR: RWRegister<u32>,
/// FDCAN TT Interrupt Enable Register
pub FDCAN_TTIE: RWRegister<u32>,
/// FDCAN TT Interrupt Line Select Register
pub FDCAN_TTILS: RWRegister<u32>,
/// FDCAN TT Operation Status Register
pub FDCAN_TTOST: RORegister<u32>,
/// FDCAN TUR Numerator Actual Register
pub FDCAN_TURNA: RORegister<u32>,
/// FDCAN TT Local and Global Time Register
pub FDCAN_TTLGT: RORegister<u32>,
/// FDCAN TT Cycle Time and Count Register
pub FDCAN_TTCTC: RORegister<u32>,
/// FDCAN TT Capture Time Register
pub FDCAN_TTCPT: RORegister<u32>,
/// FDCAN TT Cycle Sync Mark Register
pub FDCAN_TTCSM: RORegister<u32>,
_reserved8: [u32; 111],
/// FDCAN TT Trigger Select Register
pub FDCAN_TTTS: RWRegister<u32>,
}
pub struct ResetValues {
pub FDCAN_CREL: u32,
pub FDCAN_ENDN: u32,
pub FDCAN_DBTP: u32,
pub FDCAN_TEST: u32,
pub FDCAN_RWD: u32,
pub FDCAN_CCCR: u32,
pub FDCAN_NBTP: u32,
pub FDCAN_TSCC: u32,
pub FDCAN_TSCV: u32,
pub FDCAN_TOCC: u32,
pub FDCAN_TOCV: u32,
pub FDCAN_ECR: u32,
pub FDCAN_PSR: u32,
pub FDCAN_TDCR: u32,
pub FDCAN_IR: u32,
pub FDCAN_IE: u32,
pub FDCAN_ILS: u32,
pub FDCAN_ILE: u32,
pub FDCAN_GFC: u32,
pub FDCAN_SIDFC: u32,
pub FDCAN_XIDFC: u32,
pub FDCAN_XIDAM: u32,
pub FDCAN_HPMS: u32,
pub FDCAN_NDAT1: u32,
pub FDCAN_NDAT2: u32,
pub FDCAN_RXF0C: u32,
pub FDCAN_RXF0S: u32,
pub FDCAN_RXF0A: u32,
pub FDCAN_RXBC: u32,
pub FDCAN_RXF1C: u32,
pub FDCAN_RXF1S: u32,
pub FDCAN_RXF1A: u32,
pub FDCAN_RXESC: u32,
pub FDCAN_TXBC: u32,
pub FDCAN_TXFQS: u32,
pub FDCAN_TXESC: u32,
pub FDCAN_TXBRP: u32,
pub FDCAN_TXBAR: u32,
pub FDCAN_TXBCR: u32,
pub FDCAN_TXBTO: u32,
pub FDCAN_TXBCF: u32,
pub FDCAN_TXBTIE: u32,
pub FDCAN_TXBCIE: u32,
pub FDCAN_TXEFC: u32,
pub FDCAN_TXEFS: u32,
pub FDCAN_TXEFA: u32,
pub FDCAN_TTTMC: u32,
pub FDCAN_TTRMC: u32,
pub FDCAN_TTOCF: u32,
pub FDCAN_TTMLM: u32,
pub FDCAN_TURCF: u32,
pub FDCAN_TTOCN: u32,
pub CAN_TTGTP: u32,
pub FDCAN_TTTMK: u32,
pub FDCAN_TTIR: u32,
pub FDCAN_TTIE: u32,
pub FDCAN_TTILS: u32,
pub FDCAN_TTOST: u32,
pub FDCAN_TURNA: u32,
pub FDCAN_TTLGT: u32,
pub FDCAN_TTCTC: u32,
pub FDCAN_TTCPT: u32,
pub FDCAN_TTCSM: u32,
pub FDCAN_TTTS: u32,
}
#[cfg(not(feature = "nosync"))]
pub struct | {
pub(crate) addr: u32,
pub(crate) _marker: PhantomData<*const RegisterBlock>,
}
#[cfg(not(feature = "nosync"))]
impl ::core::ops::Deref for Instance {
type Target = RegisterBlock;
#[inline(always)]
fn deref(&self) -> &RegisterBlock {
unsafe { &*(self.addr as *const _) }
}
}
#[cfg(feature = "rtic")]
unsafe impl Send for Instance {}
/// Access functions for the CAN_CCU peripheral instance
pub mod CAN_CCU {
use super::ResetValues;
#[cfg(not(feature = "nosync"))]
use super::Instance;
#[cfg(not(feature = "nosync"))]
const INSTANCE: Instance = Instance {
addr: 0x4000a800,
_marker: ::core::marker::PhantomData,
};
/// Reset values for each field in CAN_CCU
pub const reset: ResetValues = ResetValues {
FDCAN_CREL: 0x32141218,
FDCAN_ENDN: 0x87654321,
FDCAN_DBTP: 0x00000A33,
FDCAN_TEST: 0x00000000,
FDCAN_RWD: 0x00000000,
FDCAN_CCCR: 0x00000001,
FDCAN_NBTP: 0x00000A33,
FDCAN_TSCC: 0x00000000,
FDCAN_TSCV: 0x00000000,
FDCAN_TOCC: 0xFFFF0000,
FDCAN_TOCV: 0x0000FFFF,
FDCAN_ECR: 0x00000000,
FDCAN_PSR: 0x00000707,
FDCAN_TDCR: 0x00000000,
FDCAN_IR: 0x00000000,
FDCAN_IE: 0x00000000,
FDCAN_ILS: 0x00000000,
FDCAN_ILE: 0x00000000,
FDCAN_GFC: 0x00000000,
FDCAN_SIDFC: 0x00000000,
FDCAN_XIDFC: 0x00000000,
FDCAN_XIDAM: 0x00000000,
FDCAN_HPMS: 0x00000000,
FDCAN_NDAT1: 0x00000000,
FDCAN_NDAT2: 0x00000000,
FDCAN_RXF0C: 0x00000000,
FDCAN_RXF0S: 0x00000000,
FDCAN_RXF0A: 0x00000000,
FDCAN_RXBC: 0x00000000,
FDCAN_RXF1C: 0x00000000,
FDCAN_RXF1S: 0x00000000,
FDCAN_RXF1A: 0x00000000,
FDCAN_RXESC: 0x00000000,
FDCAN_TXBC: 0x00000000,
FDCAN_TXFQS: 0x00000000,
FDCAN_TXESC: 0x00000000,
FDCAN_TXBRP: 0x00000000,
FDCAN_TXBAR: 0x00000000,
FDCAN_TXBCR: 0x00000000,
FDCAN_TXBTO: 0x00000000,
FDCAN_TXBCF: 0x00000000,
FDCAN_TXBTIE: 0x00000000,
FDCAN_TXBCIE: 0x00000000,
FDCAN_TXEFC: 0x00000000,
FDCAN_TXEFS: 0x00000000,
FDCAN_TXEFA: 0x00000000,
FDCAN_TTTMC: 0x00000000,
FDCAN_TTRMC: 0x00000000,
FDCAN_TTOCF: 0x00010000,
FDCAN_TTMLM: 0x00000000,
FDCAN_TURCF: 0x00000000,
FDCAN_TTOCN: 0x00000000,
CAN_TTGTP: 0x00000000,
FDCAN_TTTMK: 0x00000000,
FDCAN_TTIR: 0x00000000,
FDCAN_TTIE: 0x00000000,
FDCAN_TTILS: 0x00000000,
FDCAN_TTOST: 0x00000000,
FDCAN_TURNA: 0x00000000,
FDCAN_TTLGT: 0x00000000,
FDCAN_TTCTC: 0x00000000,
FDCAN_TTCPT: 0x00000000,
FDCAN_TTCSM: 0x00000000,
FDCAN_TTTS: 0x00000000,
};
#[cfg(not(feature = "nosync"))]
#[allow(renamed_and_removed_lints)]
#[allow(private_no_mangle_statics)]
#[no_mangle]
static mut CAN_CCU_TAKEN: bool = false;
/// Safe access to CAN_CCU
///
/// This function returns `Some(Instance)` if this instance is not
/// currently taken, and `None` if it is. This ensures that if you
/// do get `Some(Instance)`, you are ensured unique access to
/// the peripheral and there cannot be data races (unless other
/// code uses `unsafe`, of course). You can then pass the
/// `Instance` around to other functions as required. When you're
/// done with it, you can call `release(instance)` to return it.
///
/// `Instance` itself dereferences to a `RegisterBlock`, which
/// provides access to the peripheral's registers.
#[cfg(not(feature = "nosync"))]
#[inline]
pub fn take() -> Option<Instance> {
external_cortex_m::interrupt::free(|_| unsafe {
if CAN_CCU_TAKEN {
None
} else {
CAN_CCU_TAKEN = true;
Some(INSTANCE)
}
})
}
/// Release exclusive access to CAN_CCU
///
/// This function allows you to return an `Instance` so that it
/// is available to `take()` again. This function will panic if
/// you return a different `Instance` or if this instance is not
/// already taken.
#[cfg(not(feature = "nosync"))]
#[inline]
pub fn release(inst: Instance) {
external_cortex_m::interrupt::free(|_| unsafe {
if CAN_CCU_TAKEN && inst.addr == INSTANCE.addr {
CAN_CCU_TAKEN = false;
} else {
panic!("Released a peripheral which was not taken");
}
});
}
/// Unsafely steal CAN_CCU
///
/// This function is similar to take() but forcibly takes the
/// Instance, marking it as taken irregardless of its previous
/// state.
#[cfg(not(feature = "nosync"))]
#[inline]
pub unsafe fn steal() -> Instance {
CAN_CCU_TAKEN = true;
INSTANCE
}
}
/// Raw pointer to CAN_CCU
///
/// Dereferencing this is unsafe because you are not ensured unique
/// access to the peripheral, so you may encounter data races with
/// other users of this peripheral. It is up to you to ensure you
/// will not cause data races.
///
/// This constant is provided for ease of use in unsafe code: you can
/// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`.
pub const CAN_CCU: *const RegisterBlock = 0x4000a800 as *const _;
| Instance |
sentry.go | // Copyright © 2019 The Things Network Foundation, The Things Industries B.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package sentry implements a pkg/log.Handler that sends errors to Sentry
package sentry
import (
"github.com/getsentry/sentry-go"
sentryerrors "go.thethings.network/lorawan-stack/v3/pkg/errors/sentry"
"go.thethings.network/lorawan-stack/v3/pkg/log"
)
// Sentry is a log.Handler that sends errors to Sentry.
type Sentry struct{}
// New creates a new Sentry log middleware.
func N | ) log.Middleware {
return &Sentry{}
}
// Wrap an existing log handler with Sentry.
func (s *Sentry) Wrap(next log.Handler) log.Handler {
return log.HandlerFunc(func(entry log.Entry) (err error) {
if entry.Level() == log.ErrorLevel {
s.forward(entry)
}
err = next.HandleLog(entry)
return
})
}
func (s *Sentry) forward(e log.Entry) *sentry.EventID {
fields := e.Fields().Fields()
var err error
if namespaceField, ok := fields["namespace"]; ok {
switch namespaceField {
case "grpc", "web": // gRPC and web have their own Sentry integration.
return nil
}
}
if errField, ok := fields["error"]; ok {
if errField, ok := errField.(error); ok {
err = errField
}
}
evt := sentryerrors.NewEvent(err)
evt.Message = e.Message()
// Add log fields.
if fld, ok := err.(log.Fielder); ok {
errFields := fld.Fields()
for k, v := range fields {
// Filter out error fields.
if _, isErrField := errFields[k]; isErrField {
continue
}
evt.Extra[k] = v
}
} else {
for k, v := range fields {
evt.Extra[k] = v
}
}
return sentry.CaptureEvent(evt)
}
| ew( |
adaptersAtom.test.tsx | import {ThemeProvider} from '@material-ui/core/styles';
import Enzyme, {mount, render, shallow} from 'enzyme';
import React from 'react';
import renderer from 'react-test-renderer';
import theme from '~/themes/mui';
import {
Button,
Grid,
Icon,
Navigation,
Root,
Slider,
Typography,
} from '../core/adapters/atoms';
import AtomPane from '../core/atomPane';
import Adapter from 'enzyme-adapter-react-16';
Enzyme.configure({adapter: new Adapter()});
const ButtonSetup = () => {
const props = {
type: '1',
handleTabClick: jest.fn(),
};
const wrapper = render(
<ThemeProvider theme={theme}>
<Button events={{}} properties={{'test': 1}}>Follow</Button>)
</ThemeProvider>,
);
return {
props,
wrapper,
};
};
const AtomPaneSetup = () => {
const props = {
type: '1',
handleTabClick: jest.fn(),
};
const wrapper = render(
<ThemeProvider theme={theme}> <AtomPane/>)</ThemeProvider>);
return {
props,
wrapper,
};
};
const RootPaneSetup = () => {
const props = {
children: `<div>123</div>`,
className: 'root_test',
properties: {
background: {
color: '#ccc',
image: 'https://pics6.baidu.com/feed/6609c93d70cf3bc743c03f66869e7ca4cf112a98.jpeg?token=02daea0ccacf3efb0bb8000691929b63&s=22D21CC58E63B355506DE597030000C3',
size: '50',
},
},
handleTabClick: jest.fn(),
};
const wrapper = renderer.create(
<Root
children={props.children}
className={props.className}
properties={props.properties}
/>,
).toJSON();
return {
props,
wrapper,
};
};
const SliderSetup = () => {
const props = {
type: '1', | };
const actions = {
onChange: jest.fn(),
handleTabClick: jest.fn(),
};
const wrapper = shallow(<Slider
properties={{}} events={{}}/>);
return {
props,
wrapper,
actions,
};
};
describe('core/adapters/atoms', () => {
describe('Grid testing', () => {
it('the Grid status is correct', () => {
const wrapper = render(
<Grid properties={{'test': 1}}>Follow</Grid>,
);
expect(wrapper).toMatchSnapshot();
});
});
// describe('Button testing', () => {
// const {wrapper} = ButtonSetup();
// it('the Button status is correct', () => {
// expect(wrapper).toMatchSnapshot();
// });
// });
// describe('AtomPane testing', () => {
// const {wrapper} = AtomPaneSetup();
// it('the AtomPane status is correct', () => {
// expect(wrapper).toMatchSnapshot();
// });
// });
describe('Icon testing', () => {
it('the Icon status is correct', () => {
const wrapper = mount(<Icon type="B" className="icon_test"/>);
// expect(wrapper.find('icon').prop('type')).toEqual('B');
expect(wrapper).toMatchSnapshot();
});
it('should support basic usage', () => {
const wrapper = renderer.create(
<div>
<Icon type="A" className="icon_test1"/>
<Icon type="B" className="icon_test2"/>
<Icon type="C" className="icon_test3 "/>
</div>,
).toJSON();
expect(wrapper).toMatchSnapshot();
});
});
describe('Navigation testing', () => {
it('the Navigation status is correct', () => {
const wrapper = renderer.create(<Navigation
properties={{}}/>).toJSON();
expect(wrapper).toMatchSnapshot();
});
});
describe('Root testing', () => {
it('the Root status is correct', () => {
const {wrapper} = RootPaneSetup();
expect(wrapper).toMatchSnapshot();
});
});
describe('Slider testing', () => {
it('the Slider status is correct', () => {
const value = '测试Slider==========onchange事件';
const {wrapper, actions} = SliderSetup();
expect(actions.onChange.mock.calls.length).toBe(0);
wrapper.find('#mui_slider').simulate('change', value);
});
});
describe('Typography testing', () => {
it('the Typography status is correct', () => {
const wrapper = renderer.create(<Typography
properties={{}}/>).toJSON();
expect(wrapper).toMatchSnapshot();
});
});
}); | |
where-clauses-not-parameter.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn equal<T>(_: &T, _: &T) -> bool where isize : Eq {
true //~^ ERROR cannot bound type `isize`, where clause bounds may only be attached
}
// This should be fine involves a type parameter.
fn test<T: Eq>() -> bool where Option<T> : Eq {}
// This should be rejected as well.
fn test2() -> bool where Option<isize> : Eq {}
//~^ ERROR cannot bound type `core::option::Option<isize>`, where clause bounds may
#[derive(PartialEq)]
//~^ ERROR cannot bound type `isize`, where clause bounds
enum Foo<T> where isize : Eq { MkFoo(T) }
//~^ ERROR cannot bound type `isize`, where clause bounds
fn test3<T: Eq>() -> bool where Option<Foo<T>> : Eq {}
fn test4() -> bool where Option<Foo<isize>> : Eq |
//~^ ERROR cannot bound type `core::option::Option<Foo<isize>>`, where clause bounds
trait Baz<T> where isize : Eq {
//~^ ERROR cannot bound type `isize`, where clause bounds may only
fn baz(&self, t: T) where String : Eq; //~ ERROR cannot bound type `collections::string::String`
//~^ ERROR cannot bound type `isize`, where clause
}
impl Baz<isize> for isize where isize : Eq {
//~^ ERROR cannot bound type `isize`, where clause bounds
fn baz() where String : Eq {}
}
fn main() {
equal(&0, &0);
}
| {} |
num1.7.py | # 7 завдання
for n in range(1, 101): | print(n, "Я не буду їсти палички Бобо на уроці") |
|
__main__.py | """The main entry point into this package when run as a script."""
# For more details, see also
# https://docs.python.org/3/library/runpy.html
# https://docs.python.org/3/reference/import.html#special-considerations-for-main
import os
import sys
from .something import Something
def main():
|
if __name__ == "__main__":
main()
sys.exit(os.EX_OK)
| """Execute the Something standalone command-line tool."""
_ = Something.do_something() |
file.go | package logger
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
)
const (
bufferSize = 256 * 1024
)
func getLastCheck(now time.Time) uint64 {
return uint64(now.Year())*1000000 + uint64(now.Month())*10000 + uint64(now.Day())*100 + uint64(now.Hour())
}
type syncBuffer struct {
*bufio.Writer
file *os.File
count uint64
cur int
filePath string
parent *FileBackend
}
func (self *syncBuffer) Sync() error {
return self.file.Sync()
}
func (self *syncBuffer) close() {
self.Flush()
self.Sync()
self.file.Close()
}
func (self *syncBuffer) write(b []byte) {
if !self.parent.rotateByHour && self.parent.maxSize > 0 && self.parent.rotateNum > 0 && self.count+uint64(len(b)) >= self.parent.maxSize {
os.Rename(self.filePath, self.filePath+fmt.Sprintf(".%03d", self.cur)) | self.count = 0
}
self.count += uint64(len(b))
self.Writer.Write(b)
}
type FileBackend struct {
mu sync.Mutex
dir string //directory for log files
files [numSeverity]syncBuffer
flushInterval time.Duration
rotateNum int
maxSize uint64
fall bool
rotateByHour bool
lastCheck uint64
reg *regexp.Regexp // for rotatebyhour log del...
keepHours uint // keep how many hours old, only make sense when rotatebyhour is T
}
func (self *FileBackend) Flush() {
self.mu.Lock()
defer self.mu.Unlock()
for i := 0; i < numSeverity; i++ {
self.files[i].Flush()
self.files[i].Sync()
}
}
func (self *FileBackend) close() {
self.Flush()
}
func (self *FileBackend) flushDaemon() {
for {
time.Sleep(self.flushInterval)
self.Flush()
}
}
func shouldDel(fileName string, left uint) bool {
// tag should be like 2016071114
tagInt, err := strconv.Atoi(strings.Split(fileName, ".")[2])
if err != nil {
return false
}
point := time.Now().Unix() - int64(left*3600)
if getLastCheck(time.Unix(point, 0)) > uint64(tagInt) {
return true
}
return false
}
func (self *FileBackend) rotateByHourDaemon() {
for {
time.Sleep(time.Second * 1)
if self.rotateByHour {
check := getLastCheck(time.Now())
if self.lastCheck < check {
for i := 0; i < numSeverity; i++ {
os.Rename(self.files[i].filePath, self.files[i].filePath+fmt.Sprintf(".%d", self.lastCheck))
}
self.lastCheck = check
}
// also check log dir to del overtime files
files, err := ioutil.ReadDir(self.dir)
if err == nil {
for _, file := range files {
// exactly match, then we
if file.Name() == self.reg.FindString(file.Name()) &&
shouldDel(file.Name(), self.keepHours) {
os.Remove(filepath.Join(self.dir, file.Name()))
}
}
}
}
}
}
func (self *FileBackend) monitorFiles() {
for range time.NewTicker(time.Second * 5).C {
for i := 0; i < numSeverity; i++ {
fileName := path.Join(self.dir, severityName[i]+".log")
if _, err := os.Stat(fileName); err != nil && os.IsNotExist(err) {
if f, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644); err == nil {
self.mu.Lock()
self.files[i].close()
self.files[i].Writer = bufio.NewWriterSize(f, bufferSize)
self.files[i].file = f
self.mu.Unlock()
}
}
}
}
}
func (self *FileBackend) Log(s Severity, msg []byte) {
self.mu.Lock()
switch s {
case FATAL:
self.files[FATAL].write(msg)
case ERROR:
self.files[ERROR].write(msg)
case WARNING:
self.files[WARNING].write(msg)
case INFO:
self.files[INFO].write(msg)
case DEBUG:
self.files[DEBUG].write(msg)
}
if self.fall && s < INFO {
self.files[INFO].write(msg)
}
self.mu.Unlock()
if s == FATAL {
self.Flush()
}
}
func (self *FileBackend) Rotate(rotateNum1 int, maxSize1 uint64) {
self.rotateNum = rotateNum1
self.maxSize = maxSize1
}
func (self *FileBackend) SetRotateByHour(rotateByHour bool) {
self.rotateByHour = rotateByHour
if self.rotateByHour {
self.lastCheck = getLastCheck(time.Now())
} else {
self.lastCheck = 0
}
}
func (self *FileBackend) SetKeepHours(hours uint) {
self.keepHours = hours
}
func (self *FileBackend) Fall() {
self.fall = true
}
func (self *FileBackend) SetFlushDuration(t time.Duration) {
if t >= time.Second {
self.flushInterval = t
} else {
self.flushInterval = time.Second
}
}
func NewFileBackend(dir string) (*FileBackend, error) {
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, err
}
var fb FileBackend
fb.dir = dir
for i := 0; i < numSeverity; i++ {
fileName := path.Join(dir, severityName[i]+".log")
f, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
count := uint64(0)
stat, err := f.Stat()
if err == nil {
count = uint64(stat.Size())
}
fb.files[i] = syncBuffer{
Writer: bufio.NewWriterSize(f, bufferSize),
file: f,
filePath: fileName,
parent: &fb,
count: count,
}
}
// default
fb.flushInterval = time.Second * 3
fb.rotateNum = 20
fb.maxSize = 1024 * 1024 * 1024
fb.rotateByHour = false
fb.lastCheck = 0
// init reg to match files
// ONLY cover this centry...
fb.reg = regexp.MustCompile("(INFO|ERROR|WARNING|DEBUG|FATAL)\\.log\\.20[0-9]{8}")
fb.keepHours = 24 * 7
go fb.flushDaemon()
go fb.monitorFiles()
go fb.rotateByHourDaemon()
return &fb, nil
}
func Rotate(rotateNum1 int, maxSize1 uint64) {
if fileback != nil {
fileback.Rotate(rotateNum1, maxSize1)
}
}
func Fall() {
if fileback != nil {
fileback.Fall()
}
}
func SetFlushDuration(t time.Duration) {
if fileback != nil {
fileback.SetFlushDuration(t)
}
}
func SetRotateByHour(rotateByHour bool) {
if fileback != nil {
fileback.SetRotateByHour(rotateByHour)
}
}
func SetKeepHours(hours uint) {
if fileback != nil {
fileback.SetKeepHours(hours)
}
} | self.cur++
if self.cur >= self.parent.rotateNum {
self.cur = 0
} |
optimizer_modules.py | """Configurable optimizers from JAX."""
import gin
from jax.example_libraries import optimizers
@gin.configurable
def | (value):
return value
gin.external_configurable(optimizers.adam)
gin.external_configurable(optimizers.momentum)
gin.external_configurable(optimizers.nesterov)
gin.external_configurable(optimizers.exponential_decay)
gin.external_configurable(optimizers.inverse_time_decay)
gin.external_configurable(optimizers.polynomial_decay)
gin.external_configurable(optimizers.piecewise_constant)
| optimizer |
main.rs | use std::io::prelude::*;
use std::net::TcpListener;
use std::net::TcpStream;
use std::env;
mod services;
fn main() {
let args: Vec<String> = env::args().collect();
let servicename = &args[1];
println!("{} up and running!", servicename);
if servicename == "server" {
let listener = TcpListener::bind("0.0.0.0:8080").unwrap();
for stream in listener.incoming() {
let stream = stream.unwrap();
handle_server(stream);
}
}
else if servicename == "providers" {
let listener = TcpListener::bind("0.0.0.0:8084").unwrap();
for stream in listener.incoming() {
let stream = stream.unwrap();
services::providers::rust::server::stuff(stream);
}
}
}
// todo: daniel + nithin, figure out rust modules lol
fn handle_server(mut stream: TcpStream) | {
let mut buffer = [0; 1024];
stream.read(&mut buffer).unwrap();
println!("Request: {}", String::from_utf8_lossy(&buffer[..]));
let response = "HTTP/1.1 200 OK\r\n\r\n";
stream.write(response.as_bytes()).unwrap();
stream.flush().unwrap();
} |
|
__init__.py | # lang是提供股票编程语言,注重语法的翻译 | # setPY提供klang和python之间桥梁可以同享 函数和变量
from .kparse import *
from .mAST import setPY | # 利用了python 的ply lex,yacc |
attachment.go | // Copyright 2021 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package attachment
import (
"bytes"
"context"
"fmt"
"io"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/upload"
"code.gitea.io/gitea/modules/util"
"github.com/google/uuid"
)
// NewAttachment creates a new attachment object, but do not verify.
func NewAttachment(attach *models.Attachment, file io.Reader) (*models.Attachment, error) {
if attach.RepoID == 0 {
return nil, fmt.Errorf("attachment %s should belong to a repository", attach.Name)
}
err := db.WithTx(func(ctx context.Context) error {
attach.UUID = uuid.New().String()
size, err := storage.Attachments.Save(attach.RelativePath(), file, -1)
if err != nil |
attach.Size = size
return db.Insert(ctx, attach)
})
return attach, err
}
// UploadAttachment upload new attachment into storage and update database
func UploadAttachment(file io.Reader, actorID, repoID, releaseID int64, fileName string, allowedTypes string) (*models.Attachment, error) {
buf := make([]byte, 1024)
n, _ := util.ReadAtMost(file, buf)
buf = buf[:n]
if err := upload.Verify(buf, fileName, allowedTypes); err != nil {
return nil, err
}
return NewAttachment(&models.Attachment{
RepoID: repoID,
UploaderID: actorID,
ReleaseID: releaseID,
Name: fileName,
}, io.MultiReader(bytes.NewReader(buf), file))
}
| {
return fmt.Errorf("Create: %v", err)
} |
broker_test.go | package main
import (
"testing"
)
func TestPrintInput(t *testing.T) {
name := Input{"Lala"}
want := true
result := PrintInput(name)
if result != want |
}; | {
t.Fatalf(`Want %v get %v`, want, result)
} |
PostegresUsersRepository.ts | import { getRepository, Repository } from 'typeorm';
import { ICreateUserDTO } from '../../dtos/ICreateUserDTO';
import { User } from '../../entities/User';
import { IUsersRepository } from '../IUsersRepositories';
class PostgresUsersRepository implements IUsersRepository {
private repository: Repository<User>;
constructor() {
this.repository = getRepository(User);
}
async list(): Promise<User[]> {
const users = this.repository.find({
relations: ['address'],
});
return users;
}
async create({ id, name, password, phone, email, weight, age, ethnicity }: ICreateUserDTO): Promise<void> {
const user = this.repository.create({
id,
name,
password,
email,
phone,
weight, |
await this.repository.save(user);
}
async findByEmail(email: string): Promise<User | undefined> {
const user = await this.repository.findOne({ email });
return user;
}
async findByID(user_id: string): Promise<User | undefined> {
const user = await this.repository.findOne({ id: user_id });
return user;
}
async save(user: User): Promise<void> {
await this.repository.save(user);
}
async delete(user_id: string): Promise<void> {
await this.repository.delete(user_id);
}
}
export { PostgresUsersRepository }; | ethnicity,
age,
}); |
compile_shaders.py | #!/usr/bin/python -tt
import os
import argparse
import subprocess
import shutil
parser = argparse.ArgumentParser(description="Compile the Mount&Blade Warband shaders.")
parser.add_argument("-b", "--compile-b", action="store_true", help="compile the ps_2_b profile as well")
args = parser.parse_args()
if not os.access(os.path.join("shaders", "fxc.exe"), os.R_OK|os.X_OK):
print "You must copy fxc.exe from the TaleWorlds Warband shader package to the shaders subdirectory."
exit(1)
import module_info
def compile_profile(profile, name):
|
compile_profile("ps_2_a", "mb.fx")
if args.compile_b:
compile_profile("ps_2_b", "mb_2b.fx")
| command_list = ["./fxc.exe", "/nologo", "/T", "fx_2_0", "/D", "PS_2_X=%s" % profile, "/Fo", "mb.fxo", "mb.fx"]
exit_code = subprocess.call(command_list, cwd="shaders")
output_fxo = os.path.join("shaders", "mb.fxo")
if exit_code == 0:
module_fxo = module_info.export_path(name)
try:
os.remove(module_fxo)
except Exception:
pass
shutil.move(output_fxo, module_fxo)
else:
try:
os.remove(output_fxo)
except Exception:
pass
exit(exit_code) |
timeout.rs | use crate::Test;
use io_uring::{opcode, types, IoUring};
use std::time::Instant;
pub fn test_timeout(ring: &mut IoUring, test: &Test) -> anyhow::Result<()> {
require!(
test;
test.probe.is_supported(opcode::Timeout::CODE);
);
println!("test timeout");
// add timeout
let ts = types::Timespec::new().sec(1);
let timeout_e = opcode::Timeout::new(&ts);
unsafe {
let mut queue = ring.submission();
queue
.push(&timeout_e.build().user_data(0x09))
.expect("queue is full");
}
let start = Instant::now();
ring.submit_and_wait(1)?;
assert_eq!(start.elapsed().as_secs(), 1);
let cqes = ring.completion().collect::<Vec<_>>();
assert_eq!(cqes.len(), 1);
assert_eq!(cqes[0].user_data(), 0x09);
assert_eq!(cqes[0].result(), -libc::ETIME);
// add timeout but no
let ts = types::Timespec::new().sec(1);
let timeout_e = opcode::Timeout::new(&ts);
let nop_e = opcode::Nop::new();
unsafe {
let mut queue = ring.submission();
queue
.push(&timeout_e.build().user_data(0x0a))
.expect("queue is full");
queue
.push(&nop_e.build().user_data(0x0b))
.expect("queue is full");
}
// nop
let start = Instant::now();
ring.submit_and_wait(1)?;
assert_eq!(start.elapsed().as_secs(), 0);
let cqes = ring.completion().collect::<Vec<_>>();
assert_eq!(cqes.len(), 1);
assert_eq!(cqes[0].user_data(), 0x0b);
assert_eq!(cqes[0].result(), 0);
// timeout
ring.submit_and_wait(1)?;
assert_eq!(start.elapsed().as_secs(), 1);
let cqes = ring.completion().collect::<Vec<_>>();
assert_eq!(cqes.len(), 1);
assert_eq!(cqes[0].user_data(), 0x0a);
assert_eq!(cqes[0].result(), -libc::ETIME);
Ok(())
}
pub fn test_timeout_count(ring: &mut IoUring, test: &Test) -> anyhow::Result<()> {
require!(
test;
test.probe.is_supported(opcode::Timeout::CODE);
);
println!("test timeout_count");
let ts = types::Timespec::new().sec(1);
let timeout_e = opcode::Timeout::new(&ts).count(1);
let nop_e = opcode::Nop::new();
unsafe {
let mut queue = ring.submission();
queue
.push(&timeout_e.build().user_data(0x0c))
.expect("queue is full");
queue
.push(&nop_e.build().user_data(0x0d))
.expect("queue is full");
}
let start = Instant::now();
ring.submit_and_wait(2)?;
assert_eq!(start.elapsed().as_secs(), 0);
let mut cqes = ring.completion().collect::<Vec<_>>();
cqes.sort_by_key(|cqe| cqe.user_data());
assert_eq!(cqes.len(), 2);
assert_eq!(cqes[0].user_data(), 0x0c);
assert_eq!(cqes[1].user_data(), 0x0d);
assert_eq!(cqes[0].result(), 0);
assert_eq!(cqes[1].result(), 0);
Ok(())
}
pub fn test_timeout_remove(ring: &mut IoUring, test: &Test) -> anyhow::Result<()> {
require!(
test;
test.probe.is_supported(opcode::Timeout::CODE);
test.probe.is_supported(opcode::TimeoutRemove::CODE);
);
println!("test timeout_remove");
// add timeout
let ts = types::Timespec::new().sec(1);
let timeout_e = opcode::Timeout::new(&ts);
unsafe {
let mut queue = ring.submission();
queue
.push(&timeout_e.build().user_data(0x10))
.expect("queue is full");
}
ring.submit()?;
// remove timeout
let timeout_e = opcode::TimeoutRemove::new(0x10);
unsafe {
let mut queue = ring.submission();
queue
.push(&timeout_e.build().user_data(0x11))
.expect("queue is full");
}
let start = Instant::now();
ring.submit_and_wait(2)?;
assert_eq!(start.elapsed().as_secs(), 0);
let mut cqes = ring.completion().collect::<Vec<_>>();
cqes.sort_by_key(|cqe| cqe.user_data());
assert_eq!(cqes.len(), 2);
assert_eq!(cqes[0].user_data(), 0x10);
assert_eq!(cqes[1].user_data(), 0x11);
assert_eq!(cqes[0].result(), -libc::ECANCELED);
assert_eq!(cqes[1].result(), 0);
Ok(())
}
pub fn test_timeout_cancel(ring: &mut IoUring, test: &Test) -> anyhow::Result<()> {
require!(
test;
test.probe.is_supported(opcode::Timeout::CODE);
test.probe.is_supported(opcode::AsyncCancel::CODE);
);
println!("test timeout_cancel");
// add timeout
let ts = types::Timespec::new().sec(1);
let timeout_e = opcode::Timeout::new(&ts);
unsafe {
let mut queue = ring.submission();
queue
.push(&timeout_e.build().user_data(0x10))
.expect("queue is full");
}
ring.submit()?;
// remove timeout
let timeout_e = opcode::AsyncCancel::new(0x10);
unsafe {
let mut queue = ring.submission();
queue
.push(&timeout_e.build().user_data(0x11))
.expect("queue is full");
}
let start = Instant::now();
ring.submit_and_wait(2)?;
assert_eq!(start.elapsed().as_secs(), 0);
let mut cqes = ring.completion().collect::<Vec<_>>();
cqes.sort_by_key(|cqe| cqe.user_data());
assert_eq!(cqes.len(), 2);
assert_eq!(cqes[0].user_data(), 0x10);
assert_eq!(cqes[1].user_data(), 0x11);
assert_eq!(cqes[0].result(), -libc::ECANCELED);
assert_eq!(cqes[1].result(), 0);
Ok(())
}
pub fn test_timeout_abs(ring: &mut IoUring, test: &Test) -> anyhow::Result<()> {
require!(
test;
test.probe.is_supported(opcode::Timeout::CODE);
);
println!("test timeout_abs");
let mut now = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
let ret = unsafe { libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now) };
assert_eq!(ret, 0);
let ts = types::Timespec::new()
.sec(now.tv_sec as u64 + 2)
.nsec(now.tv_nsec as u32);
let timeout_e = opcode::Timeout::new(&ts).flags(types::TimeoutFlags::ABS);
unsafe {
let mut queue = ring.submission();
queue
.push(&timeout_e.build().user_data(0x19))
.expect("queue is full");
}
let start = Instant::now();
ring.submit_and_wait(1)?;
assert!(start.elapsed().as_secs() >= 1);
let cqes = ring.completion().collect::<Vec<_>>();
assert_eq!(cqes.len(), 1);
assert_eq!(cqes[0].user_data(), 0x19);
assert_eq!(cqes[0].result(), -libc::ETIME);
Ok(())
}
#[cfg(feature = "unstable")]
pub fn test_timeout_submit_args(ring: &mut IoUring, test: &Test) -> anyhow::Result<()> | {
require! {
test;
ring.params().is_feature_ext_arg();
};
println!("test timeout_submit_args");
let ts = types::Timespec::new().sec(1);
let args = types::SubmitArgs::new().timespec(&ts);
// timeout
let start = Instant::now();
match ring.submitter().submit_with_args(1, &args) {
Ok(_) => panic!(),
Err(ref err) if err.raw_os_error() == Some(libc::ETIME) => (),
Err(err) => return Err(err.into()),
}
assert_eq!(start.elapsed().as_secs(), 1);
let cqes = ring.completion().collect::<Vec<_>>();
assert!(cqes.is_empty());
// no timeout
let nop_e = opcode::Nop::new();
unsafe {
ring.submission()
.push(&nop_e.build().user_data(0x1c))
.expect("queue is full");
}
let start = Instant::now();
ring.submitter().submit_with_args(1, &args)?;
assert_eq!(start.elapsed().as_secs(), 0);
let cqes = ring.completion().collect::<Vec<_>>();
assert_eq!(cqes.len(), 1);
assert_eq!(cqes[0].user_data(), 0x1c);
assert_eq!(cqes[0].result(), 0);
Ok(())
} |
|
main.0bdae82f.chunk.js | (this["webpackJsonplogin-view"]=this["webpackJsonplogin-view"]||[]).push([[0],{76:function(e,t,a){e.exports=a(90)},81:function(e,t,a){},90:function(e,t,a){"use strict";a.r(t);var n=a(0),r=a.n(n),o=a(8),c=a.n(o),i=a(36),l=(a(81),a(10)),s=a(68),u=a(12),m=a(136),d=a(131),p=a(129),f=a(134),g=a(132),h=a(135),b=a(29),v=a.n(b),E=a(39),w=a(130),O=a(124),j=a(126),y=a(127),S=a(121),_=a(128),C=a(35),k=Object(S.a)((function(e){return{error:{color:"#ff0000"}}}));var x=function(e){var t,a=k();return r.a.createElement("div",{className:a.error,dangerouslySetInnerHTML:(t=e.error,{__html:t})})},T=a(65);var N=function(e){var t=e.onSuccess,a=Object(T.useGoogleLogin)({onSuccess:t,onFailure:function(e){console.log("Login failed: res:",e),alert("Failed to login. Please ping this to [email protected]")},clientId:"654131058807-15p8l5r4ddlusbeavvhiin9rt2cuglh6.apps.googleusercontent.com",accessType:"offline",responseType:"code"}).signIn;return r.a.createElement(C.GoogleLoginButton,{onClick:a})},P=a(66),W=a.n(P);var U=function(e){var t=e.onSuccess;return r.a.createElement(W.a,{appId:"603230757035427",autoLoad:!1,fields:"name,email",scope:"public_profile,email",callback:t,render:function(e){return r.a.createElement(C.FacebookLoginButton,{onClick:e.onClick})}})},D=Object(S.a)((function(e){return{"@global":{body:{backgroundColor:e.palette.common.white}},paper:{marginTop:e.spacing(8),display:"flex",flexDirection:"column",alignItems:"center"},avatar:{margin:e.spacing(1),backgroundColor:e.palette.secondary.main},form:{width:"100%",marginTop:e.spacing(1)},submit:{margin:e.spacing(3,0,2)},loginButtons:{width:"100%",marginTop:e.spacing(1)}}}));var L=function(){var e=D(),t=window.location.search,a=new URLSearchParams(t),o=Object(n.useState)(""),c=Object(u.a)(o,2),l=c[0],b=c[1],S=Object(n.useState)(""),C=Object(u.a)(S,2),k=C[0],T=C[1],P=Object(n.useState)(!1),W=Object(u.a)(P,2),L=W[0],R=W[1],I=Object(n.useState)(null==a.get("state")?"":a.get("state")),A=Object(u.a)(I,1)[0],F=Object(n.useState)(null==a.get("client_id")?"":a.get("client_id")),B=Object(u.a)(F,1)[0],q=Object(n.useState)(null==a.get("user_type")?"":a.get("user_type")),V=Object(u.a)(q,1)[0],H=Object(n.useState)(null==a.get("redirect_uri")?"":a.get("redirect_uri")),K=Object(u.a)(H,1)[0],J=Object(n.useState)(""),G=Object(u.a)(J,2),z=G[0],M=G[1],Y=Object(n.useState)(null),$=Object(u.a)(Y,2),Q=$[0],X=$[1],Z=Object(n.useState)(null),ee=Object(u.a)(Z,2),te=ee[0],ae=ee[1],ne=Object(n.useState)([]),re=Object(u.a)(ne,2),oe=re[0],ce=re[1],ie=function(e){e.preventDefault(),window.location.href=Q};function le(){return console.log("scopes =",oe),r.a.createElement(O.a,{component:"nav","aria-label":"secondary mailbox folders"},oe.map((function(e,t){return r.a.createElement(j.a,{key:t,button:!0},r.a.createElement(y.a,{primary:e}))})))}return null!==Q?r.a.createElement(_.a,{component:"main",maxWidth:"xs"},r.a.createElement(p.a,null),r.a.createElement("div",{className:e.paper},r.a.createElement("form",{className:e.form,noValidate:!0,onSubmit:ie},r.a.createElement(E.a,{component:"h1",variant:"h5"},"Consent"),"This application would like to access:",r.a.createElement(w.a,null),r.a.createElement(O.a,{component:"nav","aria-label":"secondary mailbox folders"},r.a.createElement(le,null)),r.a.createElement(w.a,null),r.a.createElement(d.a,{type:"submit",variant:"contained",onClick:function(e){e.preventDefault();var t=Q.split("/"),a=t[0]+"//"+t[2]+"/logout";fetch(a,{credentials:"include"}).then((function(e){if(!e.ok)throw Error(e.statusText);window.location.href=te})).catch((function(e){console.log("error=",e),M(e.toString())}))},className:e.submit},"Deny"),r.a.createElement(d.a,{type:"submit",variant:"contained",color:"primary",onClick:ie,className:e.submit},"Accept")))):r.a.createElement(_.a,{component:"main",maxWidth:"xs"},r.a.createElement(p.a,null),r.a.createElement("div",{className:e.paper},r.a.createElement(m.a,{className:e.avatar},r.a.createElement(v.a,null)),r.a.createElement(E.a,{component:"h1",variant:"h5"},"Sign in"),r.a.createElement(x,{error:z}),r.a.createElement("form",{className:e.form,noValidate:!0,onSubmit:function(e){e.preventDefault();var t={j_username:l,j_password:k,remember:L?"Y":"N",client_id:B};Object.assign(t,A&&{state:A},V&&{user_type:V},K&&{redirect_uri:K});var a=Object.keys(t).map((function(e){return encodeURIComponent(e)+"="+encodeURIComponent(t[e])})).join("&"),n=Object(s.a)({"Content-Type":"application/x-www-form-urlencoded"},Object({NODE_ENV:"production",PUBLIC_URL:"",WDS_SOCKET_HOST:void 0,WDS_SOCKET_PATH:void 0,WDS_SOCKET_PORT:void 0}).REACT_APP_SAAS_URL&&{service_url:Object({NODE_ENV:"production",PUBLIC_URL:"",WDS_SOCKET_HOST:void 0,WDS_SOCKET_PATH:void 0,WDS_SOCKET_PORT:void 0}).REACT_APP_SAAS_URL});fetch("/oauth2/code",{method:"POST",redirect:"follow",credentials:"include",headers:n,body:a}).then((function(e){if(!e.ok)throw e;return e.json()})).then((function(e){X(e.redirectUri),ae(e.denyUri),ce(e.scopes)})).catch((function(e){e.text().then((function(e){console.log("error=",e);var t={host:"lightapi.net",service:"user",action:"loginUser",version:"0.1.0",data:{email:l,password:k}},a="/portal/query?cmd="+encodeURIComponent(JSON.stringify(t));M('Login Failed! Click <a href="link">here</a> to identify root cause.'.replace("link",a))}))}))}},r.a.createElement(f.a,{variant:"outlined",margin:"normal",required:!0,fullWidth:!0,id:"j_username",label:"Email",name:"j_username",value:l,autoComplete:"username",autoFocus:!0,onChange:function(e){b(e.target.value)}}),r.a.createElement(f.a,{variant:"outlined",margin:"normal",required:!0,fullWidth:!0,name:"j_password",value:k,label:"Password",type:"password",id:"j_password",autoComplete:"password",onChange:function(e){T(e.target.value)}}),r.a.createElement(f.a,{name:"state",value:A,type:"hidden",id:"state"}),r.a.createElement(f.a,{name:"client_id",value:B,type:"hidden",id:"client_id"}),r.a.createElement(f.a,{name:"user_type",value:V,type:"hidden",id:"user_type"}),r.a.createElement(f.a,{name:"redirect_uri",value:K,type:"hidden",id:"redirect_uri"}),r.a.createElement(g.a,{control:r.a.createElement(h.a,{value:"remember",color:"primary"}),label:"Remember me",onChange:function(e){R(e.target.value)}}),r.a.createElement(d.a,{type:"submit",fullWidth:!0,variant:"contained",color:"primary",className:e.submit},"Sign In")),r.a.createElement("div",null,"Forget your password? ",r.a.createElement(i.b,{to:"/forget"},"Reset Here")),r.a.createElement("div",{className:e.loginButtons},r.a.createElement(N,{onSuccess:function(e){console.log("Google Login Success: authorization code:",e.code),console.log("referrer: ",document.referrer);var t=document.referrer.split("/"),a=t[0]+"//"+t[2];console.log("host = ",a),fetch(a+"/google?code="+e.code,{redirect:"follow",credentials:"include"}).then((function(e){if(e.ok)return e.json();throw e})).then((function(e){console.log("data =",e),X(e.redirectUri),ae(e.denyUri),ce(e.scopes)})).catch((function(e){e.text().then((function(e){M(e)}))}))}}),r.a.createElement(U,{onSuccess:function(e){console.log("Login Success: accessToken:",e.accessToken),console.log("referrer: ",document.referrer);var t=document.referrer.split("/"),a=t[0]+"//"+t[2];console.log("host = ",a),fetch(a+"/facebook?accessToken="+e.accessToken,{redirect:"follow",credentials:"include"}).then((function(e){if(e.ok)return e.json();throw e})).then((function(e){console.log("data =",e),X(e.redirectUri),ae(e.denyUri),ce(e.scopes)})).catch((function(e){e.text().then((function(e){M(e)}))}))}}))))},R=a(33),I=a.n(R),A=a(50),F=Object(S.a)((function(e){return{"@global":{body:{backgroundColor:e.palette.common.white}},paper:{marginTop:e.spacing(8),display:"flex",flexDirection:"column",alignItems:"center"},avatar:{margin:e.spacing(1),backgroundColor:e.palette.secondary.main},form:{width:"100%",marginTop:e.spacing(1)},submit:{margin:e.spacing(3,0,2)}}}));var B=function(e){var t=new URLSearchParams(e.location.search),a=F(),o=Object(n.useState)(""),c=Object(u.a)(o,2),i=c[0],l=c[1],s=Object(n.useState)(""),g=Object(u.a)(s,2),h=g[0],b=g[1],w=Object(n.useState)(""),O=Object(u.a)(w,2),j=O[0],y=O[1],S=Object(n.useState)(null==t.get("email")?"":t.get("email")),C=Object(u.a)(S,1)[0],k=Object(n.useState)(null==t.get("token")?"":t.get("token")),T=Object(u.a)(k,1)[0],N=function(){var e=Object(A.a)(I.a.mark((function e(t,a,n){var r,o;return I.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.prev=0,e.next=3,fetch(t,{method:"POST",body:JSON.stringify(n),headers:a});case 3:if((r=e.sent).ok){e.next=6;break}throw r;case 6:return e.next=8,r.json();case 8:e.sent,y("The password has been reset."),e.next=19;break;case 12:return e.prev=12,e.t0=e.catch(0),e.next=16,e.t0.json();case 16:o=e.sent,console.log(o),y(o.description);case 19:case"end":return e.stop()}}),e,null,[[0,12]])})));return function(t,a,n){return e.apply(this,arguments)}}();return r.a.createElement(_.a,{component:"main",maxWidth:"xs"},r.a.createElement(p.a,null),r.a.createElement("div",{className:a.paper},r.a.createElement(m.a,{className:a.avatar},r.a.createElement(v.a,null)),r.a.createElement(E.a,{component:"h1",variant:"h5"},"Reset Password"),r.a.createElement(x,{error:j}),r.a.createElement("form",{className:a.form,noValidate:!0,onSubmit:function(e){e.preventDefault();N("/portal/command",{"Content-Type":"application/json"},{host:"lightapi.net",service:"user",action:"resetPassword",version:"0.1.0",data:{email:C,token:T,newPassword:i,passwordConfirm:h}})}},r.a.createElement(f.a,{variant:"outlined",margin:"normal",required:!0,disabled:!0,fullWidth:!0,id:"email",label:"Email",name:"email",value:C,autoComplete:"email",autoFocus:!0}),r.a.createElement(f.a,{variant:"outlined",margin:"normal",required:!0,fullWidth:!0,name:"newPassword",value:i,label:"New Password",type:"password",id:"newPassword",autoComplete:"newPassword",onChange:function(e){l(e.target.value)}}),r.a.createElement(f.a,{variant:"outlined",margin:"normal",required:!0,fullWidth:!0,name:"passwordConfirm",value:h,label:"Password Confirm",type:"password",id:"passwordConfirm",autoComplete:"passwordConfirm",onChange:function(e){b(e.target.value)}}),r.a.createElement(f.a,{name:"token",value:T,type:"hidden",id:"token"}),r.a.createElement(d.a,{type:"submit",fullWidth:!0,variant:"contained",color:"primary",className:a.submit},"Submit"))))},q=Object(S.a)((function(e){return{"@global":{body:{backgroundColor:e.palette.common.white}},paper:{marginTop:e.spacing(8),display:"flex",flexDirection:"column",alignItems:"center"},avatar:{margin:e.spacing(1),backgroundColor:e.palette.secondary.main},form:{width:"100%",marginTop:e.spacing(1)},submit:{margin:e.spacing(3,0,2)}}}));var V=function(){var e=q(),t=Object(n.useState)(""),a=Object(u.a)(t,2),o=a[0],c=a[1],i=Object(n.useState)(""),l=Object(u.a)(i,2),s=l[0],g=l[1],h=function(){var e=Object(A.a)(I.a.mark((function e(t,a,n){var r,o;return I.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.prev=0,e.next=3,fetch(t,{method:"POST",body:JSON.stringify(n),headers:a});case 3:if((r=e.sent).ok){e.next=6;break}throw r;case 6:return e.next=8,r.json();case 8:e.sent,g("An Email has been sent."),e.next=19;break;case 12:return e.prev=12,e.t0=e.catch(0),e.next=16,e.t0.json();case 16:o=e.sent,console.log(o),g(o.description);case 19:case"end":return e.stop()}}),e,null,[[0,12]])})));return function(t,a,n){return e.apply(this,arguments)}}();return r.a.createElement(_.a,{component:"main",maxWidth:"xs"},r.a.createElement(p.a,null),r.a.createElement("div",{className:e.paper},r.a.createElement(m.a,{className:e.avatar},r.a.createElement(v.a,null)),r.a.createElement(E.a,{component:"h1",variant:"h5"},"Forget Password"),r.a.createElement(x,{error:s}),r.a.createElement("form",{className:e.form,noValidate:!0,onSubmit:function(e){e.preventDefault();h("/portal/command",{"Content-Type":"application/json"},{host:"lightapi.net",service:"user",action:"forgetPassword",version:"0.1.0",data:{email:o}})}},r.a.createElement(f.a,{variant:"outlined",margin:"normal",required:!0,fullWidth:!0,id:"email",label:"Email",name:"email",value:o,autoComplete:"email",autoFocus:!0,onChange:function(e){c(e.target.value)}}),r.a.createElement(d.a,{type:"submit",fullWidth:!0,variant:"contained",color:"primary",className:e.submit},"Submit"))))};var H=function(){return r.a.createElement(l.c,null,r.a.createElement(l.a,{exact:!0,path:"/",component:L}),r.a.createElement(l.a,{exact:!0,path:"/reset",component:B}),r.a.createElement(l.a,{exact:!0,path:"/forget",component:V}))};Boolean("localhost"===window.location.hostname||"[::1]"===window.location.hostname||window.location.hostname.match(/^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/));var K=a(67),J=a(133),G=Object(K.a)({typography:{useNextVariants:!0}});c.a.render(r.a.createElement(i.a,null,r.a.createElement(J.a,{theme:G},r.a.createElement(H,null))),document.getElementById("root")),"serviceWorker"in navigator&&navigator.serviceWorker.ready.then((function(e){e.unregister()}))}},[[76,1,2]]]); | //# sourceMappingURL=main.0bdae82f.chunk.js.map |
|
collection.js |
for (let grade = 1; grade < 4; grade++) {
for (let i = 0; i < classes.length; i++) {
document.write(grade + "年" + classes[i] + "<br>");
}
}
let a = ["あ", "い", "う", "え", "お"];
let ka = ["か", "き", "く", "け", "こ"];
for (let anum = 0; anum < a.length; anum++) {
for (let kanum = 0; kanum < ka.length; kanum++) {
document.write(a[anum] + ka[kanum] + "<br>");
document.write(ka[kanum] + a[anum] + "<br>");
}
}
/*
comment
*/
// comment | let classes = ["A組", "B組", "C組", "D組"]; |
|
main.py | import os
import time
import shutil
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models.vgg as vgg
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from plain_cnn_cifar import ConvNetMaker, plane_cifar100_book
# used for logging to TensorBoard
from tensorboard_logger import configure, log_value
parser = argparse.ArgumentParser(description='PyTorch CNN or VGG Training')
parser.add_argument('--dataset', default='cifar100', type=str,
help='dataset cifar100')
parser.add_argument('--epochs', default=200, type=int,
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int,
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.02, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--nesterov', default=True, type=bool, help='nesterov momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
help='weight decay (default: 5e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
help='print frequency (default: 10)')
parser.add_argument('--droprate', default=0, type=float,
help='dropout probability (default: 0.0)')
parser.add_argument('--no-augment', dest='augment', action='store_false',
help='whether to use standard augmentation (default: True)')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
parser.add_argument('--name', default='CNN-2', type=str,
help='name of experiment')
parser.add_argument('--student_type', default='CNN-2', type=str,
help='type of student model (CNN-2 [default] or VGG-8)')
parser.add_argument('--teacher_type', default='CNN-10', type=str,
help='type of teacher model (CNN-10 [default] or VGG-13)')
parser.add_argument('--teacher_model', default='runs/CNN-10/model_best.pth.tar', type=str,
help='path of teacher model')
parser.add_argument('--tensorboard',
help='Log progress to TensorBoard', action='store_true')
parser.add_argument("--seed", type=int, default=5143, help="A seed for reproducible training.")
parser.add_argument("--config", default='distillation.yaml', help="pruning config")
parser.add_argument("--temperature", default=1, type=float,
help='temperature parameter of distillation')
parser.add_argument("--loss_types", default=['CE', 'KL'], type=str, nargs='+',
help='loss types of distillation, should be a list of length 2, '
'first for student targets loss, second for teacher student loss.')
parser.add_argument("--loss_weights", default=[0.5, 0.5], type=float, nargs='+',
help='loss weights of distillation, should be a list of length 2, '
'and sum to 1.0, first for student targets loss weight, '
'second for teacher student loss weight.')
parser.set_defaults(augment=True)
def set_seed(seed):
|
def main():
global args, best_prec1
args, _ = parser.parse_known_args()
best_prec1 = 0
if args.seed is not None:
set_seed(args.seed)
if args.tensorboard: configure("runs/%s"%(args.name))
# Data loading code
normalize = transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761])
if args.augment:
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
(4,4,4,4),mode='reflect').squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
transform_train = transforms.Compose([
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize
])
# create teacher and student model
if args.teacher_type == 'CNN-10':
teacher_model = ConvNetMaker(plane_cifar100_book['10'])
elif args.teacher_type == 'VGG-13':
teacher_model = vgg.vgg13(num_classes=100)
else:
raise NotImplementedError('Unsupported teacher model type')
teacher_model.load_state_dict(torch.load(args.teacher_model)['state_dict'])
if args.student_type == 'CNN-2':
student_model = ConvNetMaker(plane_cifar100_book['2'])
elif args.student_type == 'VGG-8':
student_model = vgg.VGG(vgg.make_layers([64, 'M', 128, 'M', 256, 'M', 512, 'M', 512, 'M']), num_classes=100)
else:
raise NotImplementedError('Unsupported student model type')
# get the number of model parameters
print('Number of teacher model parameters: {}'.format(
sum([p.data.nelement() for p in teacher_model.parameters()])))
print('Number of student model parameters: {}'.format(
sum([p.data.nelement() for p in student_model.parameters()])))
kwargs = {'num_workers': 0, 'pin_memory': True}
assert(args.dataset == 'cifar100')
train_dataset = datasets.__dict__[args.dataset.upper()]('../data',
train=True, download=True,
transform=transform_train)
# get logits of teacher model
if args.loss_weights[1] > 0:
from tqdm import tqdm
def get_logits(teacher_model, train_dataset):
print("***** Getting logits of teacher model *****")
print(f" Num examples = {len(train_dataset) }")
logits_file = os.path.join(os.path.dirname(args.teacher_model), 'teacher_logits.npy')
if not os.path.exists(logits_file):
teacher_model.eval()
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, **kwargs)
train_dataloader = tqdm(train_dataloader, desc="Evaluating")
teacher_logits = []
for step, (input, target) in enumerate(train_dataloader):
outputs = teacher_model(input)
teacher_logits += [x for x in outputs.numpy()]
np.save(logits_file, np.array(teacher_logits))
else:
teacher_logits = np.load(logits_file)
train_dataset.targets = [{'labels':l, 'teacher_logits':tl} \
for l, tl in zip(train_dataset.targets, teacher_logits)]
return train_dataset
with torch.no_grad():
train_dataset = get_logits(teacher_model, train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
datasets.__dict__[args.dataset.upper()]('../data', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=True, **kwargs)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
student_model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# define optimizer
optimizer = torch.optim.SGD(student_model.parameters(), args.lr,
momentum=args.momentum, nesterov = args.nesterov,
weight_decay=args.weight_decay)
# cosine learning rate
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader)*args.epochs)
def train_func(model):
return train(train_loader, model, scheduler, distiller, best_prec1)
def eval_func(model):
return validate(val_loader, model, distiller)
from neural_compressor.experimental import Distillation, common
from neural_compressor.experimental.common.criterion import PyTorchKnowledgeDistillationLoss
distiller = Distillation(args.config)
distiller.teacher_model = common.Model(teacher_model)
distiller.student_model = common.Model(student_model)
distiller.train_func = train_func
distiller.eval_func = eval_func
distiller.optimizer = optimizer
distiller.criterion = PyTorchKnowledgeDistillationLoss(
temperature=args.temperature,
loss_types=args.loss_types,
loss_weights=args.loss_weights)
model = distiller()
directory = "runs/%s/"%(args.name)
os.makedirs(directory, exist_ok=True)
model.save(directory)
# change to framework model for further use
model = model.model
def train(train_loader, model, scheduler, distiller, best_prec1):
distiller.pre_epoch_begin()
for epoch in range(args.start_epoch, args.epochs):
"""Train for one epoch on the training set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
teacher_logits = None
if isinstance(target, dict):
teacher_logits = target['teacher_logits']
target = target['labels']
# compute output
output = model(input)
distiller.on_post_forward(input, teacher_logits)
loss = distiller.criterion(output, target)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# compute gradient and do SGD step
distiller.optimizer.zero_grad()
loss.backward()
distiller.optimizer.step()
scheduler.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'LR {scheduler._last_lr[0]:.6f}'.format(
epoch, i, len(train_loader), batch_time=batch_time,
loss=losses, top1=top1, scheduler=scheduler))
distiller.on_epoch_end()
# remember best prec@1 and save checkpoint
is_best = distiller.best_score > best_prec1
best_prec1 = max(distiller.best_score, best_prec1)
save_checkpoint({
'epoch': distiller._epoch_runned + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best)
# log to TensorBoard
if args.tensorboard:
log_value('train_loss', losses.avg, epoch)
log_value('train_acc', top1.avg, epoch)
log_value('learning_rate', scheduler._last_lr[0], epoch)
def validate(val_loader, model, distiller):
"""Perform validation on the validation set"""
batch_time = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
# compute output
with torch.no_grad():
output = model(input)
# measure accuracy
prec1 = accuracy(output.data, target, topk=(1,))[0]
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
# log to TensorBoard
if args.tensorboard:
log_value('val_acc', top1.avg, distiller._epoch_runned)
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""Saves checkpoint to disk"""
directory = "runs/%s/"%(args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'runs/%s/'%(args.name) + 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main() | import random
import numpy as np
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed) |
mod.rs | // Copyright 2020 Nym Technologies SA
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use config::NymConfig;
use serde::{
de::{self, IntoDeserializer, Visitor},
Deserialize, Deserializer, Serialize,
};
use std::marker::PhantomData;
use std::path::PathBuf;
use std::time::Duration;
pub mod persistence;
pub const MISSING_VALUE: &str = "MISSING VALUE";
// 'CLIENT'
const DEFAULT_DIRECTORY_SERVER: &str = "https://directory.nymtech.net";
// 'DEBUG'
const DEFAULT_ACK_WAIT_MULTIPLIER: f64 = 1.5;
const DEFAULT_ACK_WAIT_ADDITION: Duration = Duration::from_millis(1_500);
const DEFAULT_LOOP_COVER_STREAM_AVERAGE_DELAY: Duration = Duration::from_millis(1000);
const DEFAULT_MESSAGE_STREAM_AVERAGE_DELAY: Duration = Duration::from_millis(100);
const DEFAULT_AVERAGE_PACKET_DELAY: Duration = Duration::from_millis(100);
const DEFAULT_TOPOLOGY_REFRESH_RATE: Duration = Duration::from_millis(30_000);
const DEFAULT_TOPOLOGY_RESOLUTION_TIMEOUT: Duration = Duration::from_millis(5_000);
const DEFAULT_GATEWAY_RESPONSE_TIMEOUT: Duration = Duration::from_millis(1_500);
const DEFAULT_VPN_KEY_REUSE_LIMIT: usize = 1000;
const ZERO_DELAY: Duration = Duration::from_nanos(0);
// custom function is defined to deserialize based on whether field contains a pre 0.9.0
// u64 interpreted as milliseconds or proper duration introduced in 0.9.0
//
// TODO: when we get to refactoring down the line, this code can just be removed
// and all Duration fields could just have #[serde(with = "humantime_serde")] instead
// reason for that is that we don't expect anyone to be upgrading from pre 0.9.0 when we have,
// for argument sake, 0.11.0 out
fn deserialize_duration<'de, D>(deserializer: D) -> Result<Duration, D::Error>
where
D: Deserializer<'de>,
{
struct DurationVisitor;
impl<'de> Visitor<'de> for DurationVisitor {
type Value = Duration;
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
formatter.write_str("u64 or a duration")
}
fn visit_i64<E>(self, value: i64) -> Result<Duration, E>
where
E: de::Error,
{
self.visit_u64(value as u64)
}
fn visit_u64<E>(self, value: u64) -> Result<Duration, E>
where
E: de::Error,
{
Ok(Duration::from_millis(Deserialize::deserialize(
value.into_deserializer(),
)?))
}
fn visit_str<E>(self, value: &str) -> Result<Duration, E>
where
E: de::Error,
{
humantime_serde::deserialize(value.into_deserializer())
}
}
deserializer.deserialize_any(DurationVisitor)
}
pub fn missing_string_value() -> String {
MISSING_VALUE.to_string()
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
#[serde(deny_unknown_fields)]
pub struct Config<T> {
client: Client<T>,
#[serde(default)]
logging: Logging,
#[serde(default)]
debug: Debug,
}
impl<T: NymConfig> Config<T> {
pub fn new<S: Into<String>>(id: S) -> Self {
let mut cfg = Config::default();
cfg.with_id(id);
cfg
}
pub fn with_id<S: Into<String>>(&mut self, id: S) {
let id = id.into();
// identity key setting
if self.client.private_identity_key_file.as_os_str().is_empty() {
self.client.private_identity_key_file =
self::Client::<T>::default_private_identity_key_file(&id);
}
if self.client.public_identity_key_file.as_os_str().is_empty() {
self.client.public_identity_key_file =
self::Client::<T>::default_public_identity_key_file(&id);
}
// encryption key setting
if self
.client
.private_encryption_key_file
.as_os_str()
.is_empty()
{
self.client.private_encryption_key_file =
self::Client::<T>::default_private_encryption_key_file(&id);
}
if self
.client
.public_encryption_key_file
.as_os_str()
.is_empty()
{
self.client.public_encryption_key_file =
self::Client::<T>::default_public_encryption_key_file(&id);
}
// shared gateway key setting
if self.client.gateway_shared_key_file.as_os_str().is_empty() {
self.client.gateway_shared_key_file =
self::Client::<T>::default_gateway_shared_key_file(&id);
}
// ack key setting
if self.client.ack_key_file.as_os_str().is_empty() {
self.client.ack_key_file = self::Client::<T>::default_ack_key_file(&id);
}
if self
.client
.reply_encryption_key_store_path
.as_os_str()
.is_empty()
{
self.client.reply_encryption_key_store_path =
self::Client::<T>::default_reply_encryption_key_store_path(&id);
}
self.client.id = id;
}
pub fn with_gateway_id<S: Into<String>>(&mut self, id: S) {
self.client.gateway_id = id.into();
}
pub fn with_gateway_listener<S: Into<String>>(&mut self, gateway_listener: S) {
self.client.gateway_listener = gateway_listener.into();
}
pub fn with_custom_directory<S: Into<String>>(&mut self, directory_server: S) {
self.client.directory_server = directory_server.into();
}
pub fn set_high_default_traffic_volume(&mut self) {
self.debug.average_packet_delay = Duration::from_millis(10);
self.debug.loop_cover_traffic_average_delay = Duration::from_millis(100); // 10 cover messages / s
self.debug.message_sending_average_delay = Duration::from_millis(5); // 200 "real" messages / s
}
pub fn set_vpn_mode(&mut self, vpn_mode: bool) {
self.client.vpn_mode = vpn_mode;
}
pub fn set_vpn_key_reuse_limit(&mut self, reuse_limit: usize) {
self.debug.vpn_key_reuse_limit = Some(reuse_limit)
}
pub fn set_custom_version(&mut self, version: &str) {
self.client.version = version.to_string();
}
pub fn get_id(&self) -> String {
self.client.id.clone()
}
pub fn get_nym_root_directory(&self) -> PathBuf {
self.client.nym_root_directory.clone()
}
pub fn get_private_identity_key_file(&self) -> PathBuf {
self.client.private_identity_key_file.clone()
}
pub fn get_public_identity_key_file(&self) -> PathBuf {
self.client.public_identity_key_file.clone()
}
pub fn get_private_encryption_key_file(&self) -> PathBuf {
self.client.private_encryption_key_file.clone()
}
pub fn get_public_encryption_key_file(&self) -> PathBuf {
self.client.public_encryption_key_file.clone()
}
pub fn get_gateway_shared_key_file(&self) -> PathBuf {
self.client.gateway_shared_key_file.clone()
}
pub fn get_reply_encryption_key_store_path(&self) -> PathBuf {
self.client.reply_encryption_key_store_path.clone()
}
pub fn get_ack_key_file(&self) -> PathBuf {
self.client.ack_key_file.clone()
}
pub fn get_directory_server(&self) -> String {
self.client.directory_server.clone()
}
pub fn get_gateway_id(&self) -> String {
self.client.gateway_id.clone()
}
pub fn get_gateway_listener(&self) -> String {
self.client.gateway_listener.clone()
}
// Debug getters
pub fn get_average_packet_delay(&self) -> Duration {
if self.client.vpn_mode {
ZERO_DELAY
} else {
self.debug.average_packet_delay
}
}
pub fn get_average_ack_delay(&self) -> Duration {
if self.client.vpn_mode {
ZERO_DELAY | } else {
self.debug.average_ack_delay
}
}
pub fn get_ack_wait_multiplier(&self) -> f64 {
self.debug.ack_wait_multiplier
}
pub fn get_ack_wait_addition(&self) -> Duration {
self.debug.ack_wait_addition
}
pub fn get_loop_cover_traffic_average_delay(&self) -> Duration {
self.debug.loop_cover_traffic_average_delay
}
pub fn get_message_sending_average_delay(&self) -> Duration {
if self.client.vpn_mode {
ZERO_DELAY
} else {
self.debug.message_sending_average_delay
}
}
pub fn get_gateway_response_timeout(&self) -> Duration {
self.debug.gateway_response_timeout
}
pub fn get_topology_refresh_rate(&self) -> Duration {
self.debug.topology_refresh_rate
}
pub fn get_topology_resolution_timeout(&self) -> Duration {
self.debug.topology_resolution_timeout
}
pub fn get_vpn_mode(&self) -> bool {
self.client.vpn_mode
}
pub fn get_vpn_key_reuse_limit(&self) -> Option<usize> {
match self.get_vpn_mode() {
false => None,
true => Some(
self.debug
.vpn_key_reuse_limit
.unwrap_or_else(|| DEFAULT_VPN_KEY_REUSE_LIMIT),
),
}
}
pub fn get_version(&self) -> &str {
&self.client.version
}
}
impl<T: NymConfig> Default for Config<T> {
fn default() -> Self {
Config {
client: Client::<T>::default(),
logging: Default::default(),
debug: Default::default(),
}
}
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
#[serde(deny_unknown_fields)]
pub struct Client<T> {
/// Version of the client for which this configuration was created.
#[serde(default = "missing_string_value")]
version: String,
/// ID specifies the human readable ID of this particular client.
id: String,
/// URL to the directory server.
directory_server: String,
/// Special mode of the system such that all messages are sent as soon as they are received
/// and no cover traffic is generated. If set all message delays are set to 0 and overwriting
/// 'Debug' values will have no effect.
#[serde(default)]
vpn_mode: bool,
/// Path to file containing private identity key.
private_identity_key_file: PathBuf,
/// Path to file containing public identity key.
public_identity_key_file: PathBuf,
/// Path to file containing private encryption key.
private_encryption_key_file: PathBuf,
/// Path to file containing public encryption key.
public_encryption_key_file: PathBuf,
/// Path to file containing shared key derived with the specified gateway that is used
/// for all communication with it.
gateway_shared_key_file: PathBuf,
/// Path to file containing key used for encrypting and decrypting the content of an
/// acknowledgement so that nobody besides the client knows which packet it refers to.
ack_key_file: PathBuf,
/// Full path to file containing reply encryption keys of all reply-SURBs we have ever
/// sent but not received back.
reply_encryption_key_store_path: PathBuf,
/// gateway_id specifies ID of the gateway to which the client should send messages.
/// If initially omitted, a random gateway will be chosen from the available topology.
gateway_id: String,
/// Address of the gateway listener to which all client requests should be sent.
gateway_listener: String,
/// nym_home_directory specifies absolute path to the home nym Clients directory.
/// It is expected to use default value and hence .toml file should not redefine this field.
nym_root_directory: PathBuf,
#[serde(skip)]
super_struct: PhantomData<*const T>,
}
impl<T: NymConfig> Default for Client<T> {
fn default() -> Self {
// there must be explicit checks for whether id is not empty later
Client {
version: env!("CARGO_PKG_VERSION").to_string(),
id: "".to_string(),
directory_server: DEFAULT_DIRECTORY_SERVER.to_string(),
vpn_mode: false,
private_identity_key_file: Default::default(),
public_identity_key_file: Default::default(),
private_encryption_key_file: Default::default(),
public_encryption_key_file: Default::default(),
gateway_shared_key_file: Default::default(),
ack_key_file: Default::default(),
reply_encryption_key_store_path: Default::default(),
gateway_id: "".to_string(),
gateway_listener: "".to_string(),
nym_root_directory: T::default_root_directory(),
super_struct: Default::default(),
}
}
}
impl<T: NymConfig> Client<T> {
fn default_private_identity_key_file(id: &str) -> PathBuf {
T::default_data_directory(id).join("private_identity.pem")
}
fn default_public_identity_key_file(id: &str) -> PathBuf {
T::default_data_directory(id).join("public_identity.pem")
}
fn default_private_encryption_key_file(id: &str) -> PathBuf {
T::default_data_directory(id).join("private_encryption.pem")
}
fn default_public_encryption_key_file(id: &str) -> PathBuf {
T::default_data_directory(id).join("public_encryption.pem")
}
fn default_gateway_shared_key_file(id: &str) -> PathBuf {
T::default_data_directory(id).join("gateway_shared.pem")
}
fn default_ack_key_file(id: &str) -> PathBuf {
T::default_data_directory(id).join("ack_key.pem")
}
fn default_reply_encryption_key_store_path(id: &str) -> PathBuf {
T::default_data_directory(id).join("reply_key_store")
}
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
#[serde(deny_unknown_fields)]
pub struct Logging {}
impl Default for Logging {
fn default() -> Self {
Logging {}
}
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
#[serde(default, deny_unknown_fields)]
pub struct Debug {
/// The parameter of Poisson distribution determining how long, on average,
/// sent packet is going to be delayed at any given mix node.
/// So for a packet going through three mix nodes, on average, it will take three times this value
/// until the packet reaches its destination.
#[serde(
deserialize_with = "deserialize_duration",
serialize_with = "humantime_serde::serialize"
)]
average_packet_delay: Duration,
/// The parameter of Poisson distribution determining how long, on average,
/// sent acknowledgement is going to be delayed at any given mix node.
/// So for an ack going through three mix nodes, on average, it will take three times this value
/// until the packet reaches its destination.
#[serde(
deserialize_with = "deserialize_duration",
serialize_with = "humantime_serde::serialize"
)]
average_ack_delay: Duration,
/// Value multiplied with the expected round trip time of an acknowledgement packet before
/// it is assumed it was lost and retransmission of the data packet happens.
/// In an ideal network with 0 latency, this value would have been 1.
ack_wait_multiplier: f64,
/// Value added to the expected round trip time of an acknowledgement packet before
/// it is assumed it was lost and retransmission of the data packet happens.
/// In an ideal network with 0 latency, this value would have been 0.
#[serde(
deserialize_with = "deserialize_duration",
serialize_with = "humantime_serde::serialize"
)]
ack_wait_addition: Duration,
/// The parameter of Poisson distribution determining how long, on average,
/// it is going to take for another loop cover traffic message to be sent.
#[serde(
deserialize_with = "deserialize_duration",
serialize_with = "humantime_serde::serialize"
)]
loop_cover_traffic_average_delay: Duration,
/// The parameter of Poisson distribution determining how long, on average,
/// it is going to take another 'real traffic stream' message to be sent.
/// If no real packets are available and cover traffic is enabled,
/// a loop cover message is sent instead in order to preserve the rate.
#[serde(
deserialize_with = "deserialize_duration",
serialize_with = "humantime_serde::serialize"
)]
message_sending_average_delay: Duration,
/// How long we're willing to wait for a response to a message sent to the gateway,
/// before giving up on it.
#[serde(
deserialize_with = "deserialize_duration",
serialize_with = "humantime_serde::serialize"
)]
gateway_response_timeout: Duration,
/// The uniform delay every which clients are querying the directory server
/// to try to obtain a compatible network topology to send sphinx packets through.
#[serde(
deserialize_with = "deserialize_duration",
serialize_with = "humantime_serde::serialize"
)]
topology_refresh_rate: Duration,
/// During topology refresh, test packets are sent through every single possible network
/// path. This timeout determines waiting period until it is decided that the packet
/// did not reach its destination.
#[serde(
deserialize_with = "deserialize_duration",
serialize_with = "humantime_serde::serialize"
)]
topology_resolution_timeout: Duration,
/// If the mode of the client is set to VPN it specifies number of packets created with the
/// same initial secret until it gets rotated.
vpn_key_reuse_limit: Option<usize>,
}
impl Default for Debug {
fn default() -> Self {
Debug {
average_packet_delay: DEFAULT_AVERAGE_PACKET_DELAY,
average_ack_delay: DEFAULT_AVERAGE_PACKET_DELAY,
ack_wait_multiplier: DEFAULT_ACK_WAIT_MULTIPLIER,
ack_wait_addition: DEFAULT_ACK_WAIT_ADDITION,
loop_cover_traffic_average_delay: DEFAULT_LOOP_COVER_STREAM_AVERAGE_DELAY,
message_sending_average_delay: DEFAULT_MESSAGE_STREAM_AVERAGE_DELAY,
gateway_response_timeout: DEFAULT_GATEWAY_RESPONSE_TIMEOUT,
topology_refresh_rate: DEFAULT_TOPOLOGY_REFRESH_RATE,
topology_resolution_timeout: DEFAULT_TOPOLOGY_RESOLUTION_TIMEOUT,
vpn_key_reuse_limit: None,
}
}
} | |
init.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"github.com/pkg/errors"
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
"sigs.k8s.io/cluster-api/cmd/clusterctl/pkg/client/cluster"
"sigs.k8s.io/cluster-api/cmd/clusterctl/pkg/client/config"
logf "sigs.k8s.io/cluster-api/cmd/clusterctl/pkg/log"
)
const NoopProvider = "-"
// Init initializes a management cluster by adding the requested list of providers.
func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) {
log := logf.Log
// gets access to the management cluster
cluster, err := c.clusterClientFactory(options.Kubeconfig)
if err != nil {
return nil, err
}
// ensure the custom resource definitions required by clusterctl are in place
if err := cluster.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { | }
// checks if the cluster already contains a Core provider.
// if not we consider this the first time init is executed, and thus we enforce the installation of a core provider (if not already explicitly requested by the user)
firstRun := false
currentCoreProvider, err := cluster.ProviderInventory().GetDefaultProviderName(clusterctlv1.CoreProviderType)
if err != nil {
return nil, err
}
if currentCoreProvider == "" {
firstRun = true
if options.CoreProvider == "" {
options.CoreProvider = config.ClusterAPIProviderName
}
if len(options.BootstrapProviders) == 0 {
options.BootstrapProviders = append(options.BootstrapProviders, config.KubeadmBootstrapProviderName)
}
if len(options.ControlPlaneProviders) == 0 {
options.ControlPlaneProviders = append(options.ControlPlaneProviders, config.KubeadmControlPlaneProviderName)
}
}
// create an installer service, add the requested providers to the install queue (thus performing validation of the target state of the management cluster
// before starting the installation), and then perform the installation.
installer := cluster.ProviderInstaller()
addOptions := addToInstallerOptions{
installer: installer,
targetNamespace: options.TargetNamespace,
watchingNamespace: options.WatchingNamespace,
}
log.Info("Fetching providers")
if options.CoreProvider != "" {
if err := c.addToInstaller(addOptions, clusterctlv1.CoreProviderType, options.CoreProvider); err != nil {
return nil, err
}
}
if err := c.addToInstaller(addOptions, clusterctlv1.BootstrapProviderType, options.BootstrapProviders...); err != nil {
return nil, err
}
if err := c.addToInstaller(addOptions, clusterctlv1.ControlPlaneProviderType, options.ControlPlaneProviders...); err != nil {
return nil, err
}
if err := c.addToInstaller(addOptions, clusterctlv1.InfrastructureProviderType, options.InfrastructureProviders...); err != nil {
return nil, err
}
// Before installing the providers, ensure the cert-manager WebHook is in place.
if err := cluster.CertManager().EnsureWebHook(); err != nil {
return nil, err
}
components, err := installer.Install()
if err != nil {
return nil, err
}
// If this is the firstRun, then log the usage instructions.
if firstRun && options.LogUsageInstructions {
log.Info("")
log.Info("Your management cluster has been initialized successfully!")
log.Info("")
log.Info("You can now create your first workload cluster by running the following:")
log.Info("")
log.Info(" clusterctl config cluster [name] --kubernetes-version [version] | kubectl apply -f -")
log.Info("")
}
// Components is an alias for repository.Components; this makes the conversion from the two types
aliasComponents := make([]Components, len(components))
for i, components := range components {
aliasComponents[i] = components
}
return aliasComponents, nil
}
type addToInstallerOptions struct {
installer cluster.ProviderInstaller
targetNamespace string
watchingNamespace string
}
// addToInstaller adds the components to the install queue and checks that the actual provider type match the target group
func (c *clusterctlClient) addToInstaller(options addToInstallerOptions, targetGroup clusterctlv1.ProviderType, providers ...string) error {
for _, provider := range providers {
// It is possible to opt-out from automatic installation of bootstrap/controlPlane providers using '-' as a provider name (NoopProvider).
if provider == NoopProvider {
if targetGroup == clusterctlv1.CoreProviderType {
return errors.New("the '-' value can not be used for the core provider")
}
continue
}
components, err := c.getComponentsByName(provider, options.targetNamespace, options.watchingNamespace)
if err != nil {
return errors.Wrapf(err, "failed to get provider components for the %q provider", provider)
}
if components.Type() != targetGroup {
return errors.Errorf("can't use %q provider as an %q, it is a %q", provider, targetGroup, components.Type())
}
if err := options.installer.Add(components); err != nil {
return errors.Wrapf(err, "failed to prepare for installing the %q provider", provider)
}
}
return nil
} | return nil, err |
aarch64_apple_tvos.rs | use super::apple_sdk_base::{opts, AppleOS, Arch};
use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
pub fn | () -> TargetResult {
let base = opts(Arch::Arm64, AppleOS::tvOS)?;
Ok(Target {
llvm_target: "arm64-apple-tvos".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_c_int_width: "32".to_string(),
data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "tvos".to_string(),
target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
features: "+neon,+fp-armv8,+cyclone".to_string(),
eliminate_frame_pointer: false,
max_atomic_width: Some(128),
abi_blacklist: super::arm_base::abi_blacklist(),
..base
},
})
}
| target |
adblock.d.ts | import { Page } from 'puppeteer-core';
declare const _default: (page: Page) => Promise<Page>;
/**
* Enables ad blocking in page.
* Requires `@cliqz/adblocker-puppeteer` package.
*
* @param page - Page to hook to. | */
export = _default; |
|
l1only.go | // Copyright 2015 Netflix, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package orcas
import (
"github.com/netflix/rend/common"
"github.com/netflix/rend/handlers"
"github.com/netflix/rend/metrics"
"github.com/netflix/rend/protocol"
"github.com/netflix/rend/timer"
)
type L1OnlyOrca struct {
l1 handlers.Handler
res protocol.Responder
}
func | (l1, l2 handlers.Handler, res protocol.Responder) Orca {
return &L1OnlyOrca{
l1: l1,
res: res,
}
}
func (l *L1OnlyOrca) Set(req common.SetRequest) error {
//log.Println("set", string(req.Key))
metrics.IncCounter(MetricCmdSetL1)
start := timer.Now()
err := l.l1.Set(req)
metrics.ObserveHist(HistSetL1, timer.Since(start))
if err == nil {
metrics.IncCounter(MetricCmdSetSuccessL1)
metrics.IncCounter(MetricCmdSetSuccess)
err = l.res.Set(req.Opaque, req.Quiet)
} else {
metrics.IncCounter(MetricCmdSetErrorsL1)
metrics.IncCounter(MetricCmdSetErrors)
}
return err
}
func (l *L1OnlyOrca) Add(req common.SetRequest) error {
//log.Println("add", string(req.Key))
metrics.IncCounter(MetricCmdAddL1)
start := timer.Now()
err := l.l1.Add(req)
metrics.ObserveHist(HistAddL1, timer.Since(start))
if err == nil {
metrics.IncCounter(MetricCmdAddStoredL1)
metrics.IncCounter(MetricCmdAddStored)
err = l.res.Add(req.Opaque, req.Quiet)
} else if err == common.ErrKeyExists {
metrics.IncCounter(MetricCmdAddNotStoredL1)
metrics.IncCounter(MetricCmdAddNotStored)
} else {
metrics.IncCounter(MetricCmdAddErrorsL1)
metrics.IncCounter(MetricCmdAddErrors)
}
return err
}
func (l *L1OnlyOrca) Replace(req common.SetRequest) error {
//log.Println("replace", string(req.Key))
metrics.IncCounter(MetricCmdReplaceL1)
start := timer.Now()
err := l.l1.Replace(req)
metrics.ObserveHist(HistReplaceL1, timer.Since(start))
if err == nil {
metrics.IncCounter(MetricCmdReplaceStoredL1)
metrics.IncCounter(MetricCmdReplaceStored)
err = l.res.Replace(req.Opaque, req.Quiet)
} else if err == common.ErrKeyNotFound {
metrics.IncCounter(MetricCmdReplaceNotStoredL1)
metrics.IncCounter(MetricCmdReplaceNotStored)
} else {
metrics.IncCounter(MetricCmdReplaceErrorsL1)
metrics.IncCounter(MetricCmdReplaceErrors)
}
return err
}
func (l *L1OnlyOrca) Append(req common.SetRequest) error {
//log.Println("append", string(req.Key))
metrics.IncCounter(MetricCmdAppendL1)
start := timer.Now()
err := l.l1.Append(req)
metrics.ObserveHist(HistAppendL1, timer.Since(start))
if err == nil {
metrics.IncCounter(MetricCmdAppendStoredL1)
metrics.IncCounter(MetricCmdAppendStored)
err = l.res.Append(req.Opaque, req.Quiet)
} else if err == common.ErrKeyNotFound {
metrics.IncCounter(MetricCmdAppendNotStoredL1)
metrics.IncCounter(MetricCmdAppendNotStored)
} else {
metrics.IncCounter(MetricCmdAppendErrorsL1)
metrics.IncCounter(MetricCmdAppendErrors)
}
return err
}
func (l *L1OnlyOrca) Prepend(req common.SetRequest) error {
//log.Println("prepend", string(req.Key))
metrics.IncCounter(MetricCmdPrependL1)
start := timer.Now()
err := l.l1.Prepend(req)
metrics.ObserveHist(HistPrependL1, timer.Since(start))
if err == nil {
metrics.IncCounter(MetricCmdPrependStoredL1)
metrics.IncCounter(MetricCmdPrependStored)
err = l.res.Prepend(req.Opaque, req.Quiet)
} else if err == common.ErrKeyNotFound {
metrics.IncCounter(MetricCmdPrependNotStoredL1)
metrics.IncCounter(MetricCmdPrependNotStored)
} else {
metrics.IncCounter(MetricCmdPrependErrorsL1)
metrics.IncCounter(MetricCmdPrependErrors)
}
return err
}
func (l *L1OnlyOrca) Delete(req common.DeleteRequest) error {
//log.Println("delete", string(req.Key))
metrics.IncCounter(MetricCmdDeleteL1)
start := timer.Now()
err := l.l1.Delete(req)
metrics.ObserveHist(HistDeleteL1, timer.Since(start))
if err == nil {
metrics.IncCounter(MetricCmdDeleteHits)
metrics.IncCounter(MetricCmdDeleteHitsL1)
l.res.Delete(req.Opaque)
} else if err == common.ErrKeyNotFound {
metrics.IncCounter(MetricCmdDeleteMissesL1)
metrics.IncCounter(MetricCmdDeleteMisses)
} else {
metrics.IncCounter(MetricCmdDeleteErrorsL1)
metrics.IncCounter(MetricCmdDeleteErrors)
}
return err
}
func (l *L1OnlyOrca) Touch(req common.TouchRequest) error {
//log.Println("touch", string(req.Key))
metrics.IncCounter(MetricCmdTouchL1)
start := timer.Now()
err := l.l1.Touch(req)
metrics.ObserveHist(HistTouchL1, timer.Since(start))
if err == nil {
metrics.IncCounter(MetricCmdTouchHitsL1)
metrics.IncCounter(MetricCmdTouchHits)
l.res.Touch(req.Opaque)
} else if err == common.ErrKeyNotFound {
metrics.IncCounter(MetricCmdTouchMissesL1)
metrics.IncCounter(MetricCmdTouchMisses)
} else {
metrics.IncCounter(MetricCmdTouchMissesL1)
metrics.IncCounter(MetricCmdTouchMisses)
}
return err
}
func (l *L1OnlyOrca) Get(req common.GetRequest) error {
metrics.IncCounterBy(MetricCmdGetKeys, uint64(len(req.Keys)))
//debugString := "get"
//for _, k := range req.Keys {
// debugString += " "
// debugString += string(k)
//}
//println(debugString)
metrics.IncCounter(MetricCmdGetL1)
metrics.IncCounterBy(MetricCmdGetKeysL1, uint64(len(req.Keys)))
start := timer.Now()
resChan, errChan := l.l1.Get(req)
var err error
// Read all the responses back from l.l1.
// The contract is that the resChan will have GetResponse's for get hits and misses,
// and the errChan will have any other errors, such as an out of memory error from
// memcached. If any receive happens from errChan, there will be no more responses
// from resChan.
for {
select {
case res, ok := <-resChan:
if !ok {
resChan = nil
} else {
if res.Miss {
metrics.IncCounter(MetricCmdGetMissesL1)
metrics.IncCounter(MetricCmdGetMisses)
} else {
metrics.IncCounter(MetricCmdGetHits)
metrics.IncCounter(MetricCmdGetHitsL1)
}
l.res.Get(res)
}
case getErr, ok := <-errChan:
if !ok {
errChan = nil
} else {
metrics.IncCounter(MetricCmdGetErrors)
metrics.IncCounter(MetricCmdGetErrorsL1)
err = getErr
}
}
if resChan == nil && errChan == nil {
break
}
}
metrics.ObserveHist(HistGetL1, timer.Since(start))
if err == nil {
l.res.GetEnd(req.NoopOpaque, req.NoopEnd)
}
return err
}
func (l *L1OnlyOrca) GetE(req common.GetRequest) error {
// For an L1 only orchestrator, this will fail if the backend is memcached.
// It should be talking to another rend-based server, such as the L2 for the
// EVCache server project.
metrics.IncCounterBy(MetricCmdGetEKeys, uint64(len(req.Keys)))
//debugString := "gete"
//for _, k := range req.Keys {
// debugString += " "
// debugString += string(k)
//}
//println(debugString)
metrics.IncCounter(MetricCmdGetEL1)
metrics.IncCounterBy(MetricCmdGetEKeysL1, uint64(len(req.Keys)))
start := timer.Now()
resChan, errChan := l.l1.GetE(req)
var err error
// Read all the responses back from l.l1.
// The contract is that the resChan will have GetEResponse's for get hits and misses,
// and the errChan will have any other errors, such as an out of memory error from
// memcached. If any receive happens from errChan, there will be no more responses
// from resChan.
for {
select {
case res, ok := <-resChan:
if !ok {
resChan = nil
} else {
if res.Miss {
metrics.IncCounter(MetricCmdGetEMissesL1)
metrics.IncCounter(MetricCmdGetEMisses)
} else {
metrics.IncCounter(MetricCmdGetEHits)
metrics.IncCounter(MetricCmdGetEHitsL1)
}
l.res.GetE(res)
}
case getErr, ok := <-errChan:
if !ok {
errChan = nil
} else {
metrics.IncCounter(MetricCmdGetEErrors)
metrics.IncCounter(MetricCmdGetEErrorsL1)
err = getErr
}
}
if resChan == nil && errChan == nil {
break
}
}
metrics.ObserveHist(HistGetEL1, timer.Since(start))
if err == nil {
l.res.GetEnd(req.NoopOpaque, req.NoopEnd)
}
return err
}
func (l *L1OnlyOrca) Gat(req common.GATRequest) error {
//log.Println("gat", string(req.Key))
metrics.IncCounter(MetricCmdGatL1)
start := timer.Now()
res, err := l.l1.GAT(req)
metrics.ObserveHist(HistGatL1, timer.Since(start))
if err == nil {
if res.Miss {
metrics.IncCounter(MetricCmdGatMissesL1)
// TODO: Account for L2
metrics.IncCounter(MetricCmdGatMisses)
} else {
metrics.IncCounter(MetricCmdGatHits)
metrics.IncCounter(MetricCmdGatHitsL1)
}
l.res.GAT(res)
// There is no GetEnd call required here since this is only ever
// done in the binary protocol, where there's no END marker.
// Calling l.res.GetEnd was a no-op here and is just useless.
//l.res.GetEnd(0, false)
} else {
metrics.IncCounter(MetricCmdGatErrors)
metrics.IncCounter(MetricCmdGatErrorsL1)
}
return err
}
func (l *L1OnlyOrca) Noop(req common.NoopRequest) error {
return l.res.Noop(req.Opaque)
}
func (l *L1OnlyOrca) Quit(req common.QuitRequest) error {
return l.res.Quit(req.Opaque, req.Quiet)
}
func (l *L1OnlyOrca) Version(req common.VersionRequest) error {
return l.res.Version(req.Opaque)
}
func (l *L1OnlyOrca) Unknown(req common.Request) error {
return common.ErrUnknownCmd
}
func (l *L1OnlyOrca) Error(req common.Request, reqType common.RequestType, err error) {
var opaque uint32
var quiet bool
if req != nil {
opaque = req.GetOpaque()
quiet = req.IsQuiet()
}
l.res.Error(opaque, reqType, err, quiet)
}
| L1Only |
last_trigger_responses.go | // Code generated by go-swagger; DO NOT EDIT.
package webhook
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/goharbor/go-client/pkg/sdk/v2.0/models"
)
// LastTriggerReader is a Reader for the LastTrigger structure.
type LastTriggerReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *LastTriggerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewLastTriggerOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewLastTriggerBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 401:
result := NewLastTriggerUnauthorized()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewLastTriggerForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewLastTriggerInternalServerError()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
}
}
// NewLastTriggerOK creates a LastTriggerOK with default headers values
func NewLastTriggerOK() *LastTriggerOK {
return &LastTriggerOK{}
}
/*LastTriggerOK handles this case with default header values.
Test webhook connection successfully.
*/
type LastTriggerOK struct {
Payload []*models.WebhookLastTrigger
}
func (o *LastTriggerOK) Error() string {
return fmt.Sprintf("[GET /projects/{project_name_or_id}/webhook/lasttrigger][%d] lastTriggerOK %+v", 200, o.Payload)
}
func (o *LastTriggerOK) GetPayload() []*models.WebhookLastTrigger {
return o.Payload
}
func (o *LastTriggerOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewLastTriggerBadRequest creates a LastTriggerBadRequest with default headers values
func NewLastTriggerBadRequest() *LastTriggerBadRequest {
return &LastTriggerBadRequest{}
}
/*LastTriggerBadRequest handles this case with default header values.
Bad request
*/
type LastTriggerBadRequest struct {
/*The ID of the corresponding request for the response
*/
XRequestID string
Payload *models.Errors
}
func (o *LastTriggerBadRequest) Error() string {
return fmt.Sprintf("[GET /projects/{project_name_or_id}/webhook/lasttrigger][%d] lastTriggerBadRequest %+v", 400, o.Payload)
}
func (o *LastTriggerBadRequest) GetPayload() *models.Errors {
return o.Payload
} |
func (o *LastTriggerBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response header X-Request-Id
o.XRequestID = response.GetHeader("X-Request-Id")
o.Payload = new(models.Errors)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewLastTriggerUnauthorized creates a LastTriggerUnauthorized with default headers values
func NewLastTriggerUnauthorized() *LastTriggerUnauthorized {
return &LastTriggerUnauthorized{}
}
/*LastTriggerUnauthorized handles this case with default header values.
Unauthorized
*/
type LastTriggerUnauthorized struct {
/*The ID of the corresponding request for the response
*/
XRequestID string
Payload *models.Errors
}
func (o *LastTriggerUnauthorized) Error() string {
return fmt.Sprintf("[GET /projects/{project_name_or_id}/webhook/lasttrigger][%d] lastTriggerUnauthorized %+v", 401, o.Payload)
}
func (o *LastTriggerUnauthorized) GetPayload() *models.Errors {
return o.Payload
}
func (o *LastTriggerUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response header X-Request-Id
o.XRequestID = response.GetHeader("X-Request-Id")
o.Payload = new(models.Errors)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewLastTriggerForbidden creates a LastTriggerForbidden with default headers values
func NewLastTriggerForbidden() *LastTriggerForbidden {
return &LastTriggerForbidden{}
}
/*LastTriggerForbidden handles this case with default header values.
Forbidden
*/
type LastTriggerForbidden struct {
/*The ID of the corresponding request for the response
*/
XRequestID string
Payload *models.Errors
}
func (o *LastTriggerForbidden) Error() string {
return fmt.Sprintf("[GET /projects/{project_name_or_id}/webhook/lasttrigger][%d] lastTriggerForbidden %+v", 403, o.Payload)
}
func (o *LastTriggerForbidden) GetPayload() *models.Errors {
return o.Payload
}
func (o *LastTriggerForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response header X-Request-Id
o.XRequestID = response.GetHeader("X-Request-Id")
o.Payload = new(models.Errors)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewLastTriggerInternalServerError creates a LastTriggerInternalServerError with default headers values
func NewLastTriggerInternalServerError() *LastTriggerInternalServerError {
return &LastTriggerInternalServerError{}
}
/*LastTriggerInternalServerError handles this case with default header values.
Internal server error
*/
type LastTriggerInternalServerError struct {
/*The ID of the corresponding request for the response
*/
XRequestID string
Payload *models.Errors
}
func (o *LastTriggerInternalServerError) Error() string {
return fmt.Sprintf("[GET /projects/{project_name_or_id}/webhook/lasttrigger][%d] lastTriggerInternalServerError %+v", 500, o.Payload)
}
func (o *LastTriggerInternalServerError) GetPayload() *models.Errors {
return o.Payload
}
func (o *LastTriggerInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response header X-Request-Id
o.XRequestID = response.GetHeader("X-Request-Id")
o.Payload = new(models.Errors)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
} | |
index.ts | export * from './carousel.component';
export * from './carousel.model';
export * from './carousel.module';
export * from './carousel.service'; | ||
create_user.py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# justice-iam-service (5.10.1)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ModelUserCreateRequest
from ...models import ModelUserCreateResponse
class CreateUser(Operation):
"""Create User (CreateUser)
Required permission 'NAMESPACE:{namespace}:USER [CREATE]'.
Available Authentication Types:
1. EMAILPASSWD : an authentication type used for new user registration through email.
2. PHONEPASSWD : an authentication type used for new user registration through phone number.
Country use ISO3166-1 alpha-2 two letter, e.g. US.
Required Permission(s):
- NAMESPACE:{namespace}:USER [CREATE]
Properties:
url: /iam/namespaces/{namespace}/users
method: POST
tags: ["Users"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH]
body: (body) REQUIRED ModelUserCreateRequest in body
namespace: (namespace) REQUIRED str in path
Responses:
201: Created - ModelUserCreateResponse (Created)
400: Bad Request -
401: Unauthorized - (Unauthorized access)
403: Forbidden - (Forbidden)
409: Conflict -
"""
# region fields
_url: str = "/iam/namespaces/{namespace}/users"
_method: str = "POST"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"]]
_location_query: str = None
body: ModelUserCreateRequest # REQUIRED in [body]
namespace: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
if not hasattr(self, "body") or self.body is None:
return None
return self.body.to_dict()
def | (self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_body(self, value: ModelUserCreateRequest) -> CreateUser:
self.body = value
return self
def with_namespace(self, value: str) -> CreateUser:
self.namespace = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = ModelUserCreateRequest()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, ModelUserCreateResponse], Union[None, HttpResponse]]:
"""Parse the given response.
201: Created - ModelUserCreateResponse (Created)
400: Bad Request -
401: Unauthorized - (Unauthorized access)
403: Forbidden - (Forbidden)
409: Conflict -
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(code=code, content_type=content_type, content=content)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 201:
return ModelUserCreateResponse.create_from_dict(content), None
if code == 400:
return None, HttpResponse.create(code, "Bad Request")
if code == 401:
return None, HttpResponse.create(code, "Unauthorized")
if code == 403:
return None, HttpResponse.create(code, "Forbidden")
if code == 409:
return None, HttpResponse.create(code, "Conflict")
return None, self.handle_undocumented_response(code=code, content_type=content_type, content=content)
# endregion response methods
# region static methods
@classmethod
def create(
cls,
body: ModelUserCreateRequest,
namespace: str,
) -> CreateUser:
instance = cls()
instance.body = body
instance.namespace = namespace
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> CreateUser:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = ModelUserCreateRequest.create_from_dict(dict_["body"], include_empty=include_empty)
elif include_empty:
instance.body = ModelUserCreateRequest()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"namespace": "namespace",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"body": True,
"namespace": True,
}
# endregion static methods
| get_path_params |
elastic.py | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import time
from collections import defaultdict
import requests
from six import iteritems, itervalues
from six.moves.urllib.parse import urljoin, urlparse
from datadog_checks.base import AgentCheck, is_affirmative, to_string
from .config import from_instance
from .metrics import (
CLUSTER_PENDING_TASKS,
health_stats_for_version,
index_stats_for_version,
node_system_stats_for_version,
pshard_stats_for_version,
slm_stats_for_version,
stats_for_version,
)
class AuthenticationError(requests.exceptions.HTTPError):
"""Authentication Error, unable to reach server"""
class ESCheck(AgentCheck):
HTTP_CONFIG_REMAPPER = {
'aws_service': {'name': 'aws_service', 'default': 'es'},
'ssl_verify': {'name': 'tls_verify'},
'ssl_cert': {'name': 'tls_cert'},
'ssl_key': {'name': 'tls_private_key'},
}
SERVICE_CHECK_CONNECT_NAME = 'elasticsearch.can_connect'
SERVICE_CHECK_CLUSTER_STATUS = 'elasticsearch.cluster_health'
SOURCE_TYPE_NAME = 'elasticsearch'
def __init__(self, name, init_config, instances):
super(ESCheck, self).__init__(name, init_config, instances)
# Host status needs to persist across all checks
self.cluster_status = {}
if self.instance.get('auth_type') == 'aws' and self.instance.get('url'):
self.HTTP_CONFIG_REMAPPER = self.HTTP_CONFIG_REMAPPER.copy()
self.HTTP_CONFIG_REMAPPER['aws_host'] = {
'name': 'aws_host',
'default': urlparse(self.instance['url']).hostname,
}
self._config = from_instance(self.instance)
def check(self, _):
admin_forwarder = self._config.admin_forwarder
jvm_rate = self.instance.get('gc_collectors_as_rate', False)
base_tags = list(self._config.tags)
service_check_tags = list(self._config.service_check_tags)
# Check ES version for this instance and define parameters
# (URLs and metrics) accordingly
try:
version = self._get_es_version()
except AuthenticationError:
self.log.exception("The ElasticSearch credentials are incorrect")
raise
health_url, stats_url, pshard_stats_url, pending_tasks_url, slm_url = self._get_urls(version)
stats_metrics = stats_for_version(version, jvm_rate)
if self._config.cluster_stats:
# Include Node System metrics
stats_metrics.update(node_system_stats_for_version(version))
pshard_stats_metrics = pshard_stats_for_version(version)
# Load stats data.
# This must happen before other URL processing as the cluster name
# is retrieved here, and added to the tag list.
stats_url = self._join_url(stats_url, admin_forwarder)
stats_data = self._get_data(stats_url)
if stats_data.get('cluster_name'):
# retrieve the cluster name from the data, and append it to the
# master tag list.
cluster_tags = ["elastic_cluster:{}".format(stats_data['cluster_name'])]
if not is_affirmative(self.instance.get('disable_legacy_cluster_tag', False)):
cluster_tags.append("cluster_name:{}".format(stats_data['cluster_name']))
base_tags.extend(cluster_tags)
service_check_tags.extend(cluster_tags)
self._process_stats_data(stats_data, stats_metrics, base_tags)
# Load cluster-wise data
# Note: this is a cluster-wide query, might TO.
if self._config.pshard_stats:
send_sc = bubble_ex = not self._config.pshard_graceful_to
pshard_stats_url = self._join_url(pshard_stats_url, admin_forwarder)
try:
pshard_stats_data = self._get_data(pshard_stats_url, send_sc=send_sc)
self._process_pshard_stats_data(pshard_stats_data, pshard_stats_metrics, base_tags)
except requests.ReadTimeout as e:
if bubble_ex:
raise
self.log.warning("Timed out reading pshard-stats from servers (%s) - stats will be missing", e)
# Get Snapshot Lifecycle Management (SLM) policies
if slm_url is not None:
slm_url = self._join_url(slm_url, admin_forwarder)
policy_data = self._get_data(slm_url)
self._process_policy_data(policy_data, version, base_tags)
# Load the health data.
health_url = self._join_url(health_url, admin_forwarder)
health_data = self._get_data(health_url)
self._process_health_data(health_data, version, base_tags, service_check_tags)
if self._config.pending_task_stats:
# Load the pending_tasks data.
pending_tasks_url = self._join_url(pending_tasks_url, admin_forwarder)
pending_tasks_data = self._get_data(pending_tasks_url)
self._process_pending_tasks_data(pending_tasks_data, base_tags)
if self._config.index_stats and version >= [1, 0, 0]:
try:
self._get_index_metrics(admin_forwarder, version, base_tags)
except requests.ReadTimeout as e:
self.log.warning("Timed out reading index stats from servers (%s) - stats will be missing", e)
# If we're here we did not have any ES conn issues
self.service_check(self.SERVICE_CHECK_CONNECT_NAME, AgentCheck.OK, tags=self._config.service_check_tags)
def _get_es_version(self):
"""
Get the running version of elasticsearch.
"""
try:
data = self._get_data(self._config.url, send_sc=False)
raw_version = data['version']['number']
self.set_metadata('version', raw_version)
# pre-release versions of elasticearch are suffixed with -rcX etc..
# peel that off so that the map below doesn't error out
raw_version = raw_version.split('-')[0]
version = [int(p) for p in raw_version.split('.')[0:3]]
except AuthenticationError:
raise
except Exception as e:
self.warning("Error while trying to get Elasticsearch version from %s %s", self._config.url, e)
version = [1, 0, 0]
self.log.debug("Elasticsearch version is %s", version)
return version
def _join_url(self, url, admin_forwarder=False):
"""
overrides `urlparse.urljoin` since it removes base url path
https://docs.python.org/2/library/urlparse.html#urlparse.urljoin
"""
if admin_forwarder:
return self._config.url + url
else:
return urljoin(self._config.url, url)
def _get_index_metrics(self, admin_forwarder, version, base_tags):
cat_url = '/_cat/indices?format=json&bytes=b'
index_url = self._join_url(cat_url, admin_forwarder)
index_resp = self._get_data(index_url)
index_stats_metrics = index_stats_for_version(version)
health_stat = {'green': 0, 'yellow': 1, 'red': 2}
reversed_health_stat = {'red': 0, 'yellow': 1, 'green': 2}
for idx in index_resp:
tags = base_tags + ['index_name:' + idx['index']]
# we need to remap metric names because the ones from elastic
# contain dots and that would confuse `_process_metric()` (sic)
index_data = {
'docs_count': idx.get('docs.count'),
'docs_deleted': idx.get('docs.deleted'),
'primary_shards': idx.get('pri'),
'replica_shards': idx.get('rep'),
'primary_store_size': idx.get('pri.store.size'),
'store_size': idx.get('store.size'),
'health': idx.get('health'),
}
# Convert the health status value
if index_data['health'] is not None:
status = index_data['health'].lower()
index_data['health'] = health_stat[status]
index_data['health_reverse'] = reversed_health_stat[status]
# Ensure that index_data does not contain None values
for key, value in list(iteritems(index_data)):
if value is None:
del index_data[key]
self.log.warning("The index %s has no metric data for %s", idx['index'], key)
for metric in index_stats_metrics:
# metric description
desc = index_stats_metrics[metric]
self._process_metric(index_data, metric, *desc, tags=tags)
def _get_urls(self, version):
"""
Compute the URLs we need to hit depending on the running ES version
"""
pshard_stats_url = "/_stats"
health_url = "/_cluster/health"
slm_url = None
if version >= [0, 90, 10]:
pending_tasks_url = "/_cluster/pending_tasks"
stats_url = "/_nodes/stats" if self._config.cluster_stats else "/_nodes/_local/stats"
if version < [5, 0, 0]:
# version 5 errors out if the `all` parameter is set
stats_url += "?all=true"
if version >= [7, 4, 0] and self._config.slm_stats:
slm_url = "/_slm/policy"
else:
# legacy
pending_tasks_url = None
stats_url = (
"/_cluster/nodes/stats?all=true"
if self._config.cluster_stats
else "/_cluster/nodes/_local/stats?all=true"
)
return health_url, stats_url, pshard_stats_url, pending_tasks_url, slm_url
def _get_data(self, url, send_sc=True):
"""
Hit a given URL and return the parsed json
"""
resp = None
try:
resp = self.http.get(url)
resp.raise_for_status()
except Exception as e:
# this means we've hit a particular kind of auth error that means the config is broken
if resp and resp.status_code == 400:
raise AuthenticationError("The ElasticSearch credentials are incorrect")
if send_sc:
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.CRITICAL,
message="Error {} when hitting {}".format(e, url),
tags=self._config.service_check_tags,
)
raise
self.log.debug("request to url %s returned: %s", url, resp)
return resp.json()
def _process_pending_tasks_data(self, data, base_tags):
p_tasks = defaultdict(int)
average_time_in_queue = 0
for task in data.get('tasks', []):
p_tasks[task.get('priority')] += 1
average_time_in_queue += task.get('time_in_queue_millis', 0)
total = sum(itervalues(p_tasks))
node_data = {
'pending_task_total': total,
'pending_tasks_priority_high': p_tasks['high'],
'pending_tasks_priority_urgent': p_tasks['urgent'],
# if total is 0 default to 1
'pending_tasks_time_in_queue': average_time_in_queue // (total or 1),
}
for metric in CLUSTER_PENDING_TASKS:
# metric description
desc = CLUSTER_PENDING_TASKS[metric]
self._process_metric(node_data, metric, *desc, tags=base_tags)
def _process_stats_data(self, data, stats_metrics, base_tags):
for node_data in itervalues(data.get('nodes', {})):
metric_hostname = None
metrics_tags = list(base_tags)
# Resolve the node's name
node_name = node_data.get('name')
if node_name:
metrics_tags.append('node_name:{}'.format(node_name))
# Resolve the node's hostname
if self._config.node_name_as_host:
if node_name:
metric_hostname = node_name
elif self._config.cluster_stats:
for k in ['hostname', 'host']:
if k in node_data:
metric_hostname = node_data[k]
break
for metric, desc in iteritems(stats_metrics):
self._process_metric(node_data, metric, *desc, tags=metrics_tags, hostname=metric_hostname)
def _process_pshard_stats_data(self, data, pshard_stats_metrics, base_tags):
for metric, desc in iteritems(pshard_stats_metrics):
self._process_metric(data, metric, *desc, tags=base_tags)
def _process_metric(self, data, metric, xtype, path, xform=None, tags=None, hostname=None):
"""
data: dictionary containing all the stats
metric: datadog metric
path: corresponding path in data, flattened, e.g. thread_pool.bulk.queue
xform: a lambda to apply to the numerical value
"""
value = data
# Traverse the nested dictionaries
for key in path.split('.'):
if value is not None:
value = value.get(key)
else:
break
if value is not None:
if xform:
value = xform(value)
if xtype == "gauge":
self.gauge(metric, value, tags=tags, hostname=hostname)
else:
self.rate(metric, value, tags=tags, hostname=hostname)
else:
self.log.debug("Metric not found: %s -> %s", path, metric)
def _process_health_data(self, data, version, base_tags, service_check_tags):
cluster_status = data.get('status')
if not self.cluster_status.get(self._config.url):
self.cluster_status[self._config.url] = cluster_status
if cluster_status in ["yellow", "red"]:
event = self._create_event(cluster_status, tags=base_tags)
self.event(event)
if cluster_status != self.cluster_status.get(self._config.url):
self.cluster_status[self._config.url] = cluster_status
event = self._create_event(cluster_status, tags=base_tags)
self.event(event)
cluster_health_metrics = health_stats_for_version(version)
for metric, desc in iteritems(cluster_health_metrics):
self._process_metric(data, metric, *desc, tags=base_tags)
# Process the service check
if cluster_status == 'green':
status = AgentCheck.OK
data['tag'] = "OK"
elif cluster_status == 'yellow':
status = AgentCheck.WARNING
data['tag'] = "WARN"
else:
status = AgentCheck.CRITICAL
data['tag'] = "ALERT"
msg = (
"{tag} on cluster \"{cluster_name}\" "
"| active_shards={active_shards} "
"| initializing_shards={initializing_shards} "
"| relocating_shards={relocating_shards} "
"| unassigned_shards={unassigned_shards} "
"| timed_out={timed_out}".format(
tag=data.get('tag'),
cluster_name=data.get('cluster_name'),
active_shards=data.get('active_shards'),
initializing_shards=data.get('initializing_shards'),
relocating_shards=data.get('relocating_shards'),
unassigned_shards=data.get('unassigned_shards'),
timed_out=data.get('timed_out'),
)
)
self.service_check(self.SERVICE_CHECK_CLUSTER_STATUS, status, message=msg, tags=service_check_tags)
def _process_policy_data(self, data, version, base_tags):
|
def _create_event(self, status, tags=None):
hostname = to_string(self.hostname)
if status == "red":
alert_type = "error"
msg_title = "{} is {}".format(hostname, status)
elif status == "yellow":
alert_type = "warning"
msg_title = "{} is {}".format(hostname, status)
else:
# then it should be green
alert_type = "success"
msg_title = "{} recovered as {}".format(hostname, status)
msg = "ElasticSearch: {} just reported as {}".format(hostname, status)
return {
'timestamp': int(time.time()),
'event_type': 'elasticsearch',
'host': hostname,
'msg_text': msg,
'msg_title': msg_title,
'alert_type': alert_type,
'source_type_name': "elasticsearch",
'event_object': hostname,
'tags': tags,
}
| for policy, policy_data in iteritems(data):
repo = policy_data.get('policy', {}).get('repository', 'unknown')
tags = base_tags + ['policy:{}'.format(policy), 'repository:{}'.format(repo)]
slm_stats = slm_stats_for_version(version)
for metric, desc in iteritems(slm_stats):
self._process_metric(policy_data, metric, *desc, tags=tags) |
server.go | package main
import (
"crypto/rand"
"fmt"
"log"
"net"
"time"
"github.com/yinghuocho/golibfq/mux"
"github.com/yinghuocho/golibfq/obf"
"github.com/yinghuocho/golibfq/sockstun"
"github.com/yinghuocho/gosocks"
)
func | () {
socksSvr := gosocks.NewBasicServer("127.0.0.1:1080", 5*time.Minute)
go socksSvr.ListenAndServe()
tunSvr, err := net.Listen("tcp", ":2000")
if err != nil {
log.Fatal(err)
}
defer tunSvr.Close()
auth := sockstun.NewTunnelAnonymousAuthenticator()
for {
// Wait for a connection.
conn, err := tunSvr.Accept()
if err != nil {
log.Fatal(err)
}
go func(c net.Conn) {
// add a layer of obfuscation
var mask [obf.XorMaskLength]byte
rand.Read(mask[:])
obfedConn := obf.NewXorObfConn(c, mask)
muxServer := mux.NewServer(obfedConn)
for {
stream, err := muxServer.Accept()
if err != nil {
fmt.Println("error accepting Stream", err)
return
}
c, err := net.DialTimeout("tcp", "127.0.0.1:1080", socksSvr.GetTimeout())
if err != nil {
fmt.Println("error connecting SOCKS server", err)
stream.Close()
return
}
socks := &gosocks.SocksConn{c.(net.Conn), socksSvr.GetTimeout()}
if auth.ServerAuthenticate(stream, socks) != nil {
stream.Close()
socks.Close()
return
}
go sockstun.TunnelServer(stream, socks)
}
}(conn)
}
}
| main |
weather.ext.pb.micro.go | // Code generated by protoc-gen-micro. DO NOT EDIT.
// source: weather.ext.proto
package pb
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
import (
context "context"
api "github.com/micro/go-micro/v2/api"
client "github.com/micro/go-micro/v2/client"
server "github.com/micro/go-micro/v2/server"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Reference imports to suppress errors if they are not otherwise used.
var _ api.Endpoint
var _ context.Context
var _ client.Option
var _ server.Option
// Api Endpoints for WeatherService service
func NewWeatherServiceEndpoints() []*api.Endpoint {
return []*api.Endpoint{}
}
// Client API for WeatherService service
type WeatherService interface {
//获取实况的天气数据
Now(ctx context.Context, in *DataReq, opts ...client.CallOption) (*NowData, error)
//获取天气预报数据
Forecast(ctx context.Context, in *DataReq, opts ...client.CallOption) (*ForecastData, error)
//获取近海天气数据
Seas(ctx context.Context, in *DataReq, opts ...client.CallOption) (*SeasData, error)
}
type weatherService struct {
c client.Client
name string
}
func NewWeatherService(name string, c client.Client) WeatherService {
return &weatherService{
c: c,
name: name,
}
}
func (c *weatherService) Now(ctx context.Context, in *DataReq, opts ...client.CallOption) (*NowData, error) {
req := c.c.NewRequest(c.name, "WeatherService.Now", in)
out := new(NowData)
err := c.c.Call(ctx, req, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c | cast(ctx context.Context, in *DataReq, opts ...client.CallOption) (*ForecastData, error) {
req := c.c.NewRequest(c.name, "WeatherService.Forecast", in)
out := new(ForecastData)
err := c.c.Call(ctx, req, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *weatherService) Seas(ctx context.Context, in *DataReq, opts ...client.CallOption) (*SeasData, error) {
req := c.c.NewRequest(c.name, "WeatherService.Seas", in)
out := new(SeasData)
err := c.c.Call(ctx, req, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for WeatherService service
type WeatherServiceHandler interface {
//获取实况的天气数据
Now(context.Context, *DataReq, *NowData) error
//获取天气预报数据
Forecast(context.Context, *DataReq, *ForecastData) error
//获取近海天气数据
Seas(context.Context, *DataReq, *SeasData) error
}
func RegisterWeatherServiceHandler(s server.Server, hdlr WeatherServiceHandler, opts ...server.HandlerOption) error {
type weatherService interface {
Now(ctx context.Context, in *DataReq, out *NowData) error
Forecast(ctx context.Context, in *DataReq, out *ForecastData) error
Seas(ctx context.Context, in *DataReq, out *SeasData) error
}
type WeatherService struct {
weatherService
}
h := &weatherServiceHandler{hdlr}
return s.Handle(s.NewHandler(&WeatherService{h}, opts...))
}
type weatherServiceHandler struct {
WeatherServiceHandler
}
func (h *weatherServiceHandler) Now(ctx context.Context, in *DataReq, out *NowData) error {
return h.WeatherServiceHandler.Now(ctx, in, out)
}
func (h *weatherServiceHandler) Forecast(ctx context.Context, in *DataReq, out *ForecastData) error {
return h.WeatherServiceHandler.Forecast(ctx, in, out)
}
func (h *weatherServiceHandler) Seas(ctx context.Context, in *DataReq, out *SeasData) error {
return h.WeatherServiceHandler.Seas(ctx, in, out)
}
| *weatherService) Fore |
aggregates_histogram_response.py | # coding: utf-8
"""
ARTIK Cloud API
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class AggregatesHistogramResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, data=None, end_date=None, field=None, interval=None, sdid=None, size=None, start_date=None):
"""
AggregatesHistogramResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'data': 'list[AggregatesHistogramData]',
'end_date': 'int',
'field': 'str',
'interval': 'str',
'sdid': 'str',
'size': 'int',
'start_date': 'int'
}
self.attribute_map = {
'data': 'data',
'end_date': 'endDate',
'field': 'field',
'interval': 'interval',
'sdid': 'sdid',
'size': 'size',
'start_date': 'startDate'
}
self._data = data
self._end_date = end_date
self._field = field
self._interval = interval
self._sdid = sdid
self._size = size
self._start_date = start_date
@property
def data(self):
"""
Gets the data of this AggregatesHistogramResponse.
:return: The data of this AggregatesHistogramResponse.
:rtype: list[AggregatesHistogramData]
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this AggregatesHistogramResponse.
:param data: The data of this AggregatesHistogramResponse.
:type: list[AggregatesHistogramData]
"""
self._data = data
@property
def end_date(self):
"""
Gets the end_date of this AggregatesHistogramResponse.
:return: The end_date of this AggregatesHistogramResponse.
:rtype: int
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""
Sets the end_date of this AggregatesHistogramResponse.
:param end_date: The end_date of this AggregatesHistogramResponse.
:type: int
"""
self._end_date = end_date
@property
def field(self):
"""
Gets the field of this AggregatesHistogramResponse.
:return: The field of this AggregatesHistogramResponse.
:rtype: str
"""
return self._field
@field.setter
def field(self, field):
"""
Sets the field of this AggregatesHistogramResponse.
:param field: The field of this AggregatesHistogramResponse.
:type: str
"""
self._field = field
@property
def interval(self):
"""
Gets the interval of this AggregatesHistogramResponse.
:return: The interval of this AggregatesHistogramResponse.
:rtype: str
"""
return self._interval
@interval.setter
def interval(self, interval):
"""
Sets the interval of this AggregatesHistogramResponse.
:param interval: The interval of this AggregatesHistogramResponse.
:type: str
"""
self._interval = interval
@property
def sdid(self):
"""
Gets the sdid of this AggregatesHistogramResponse.
:return: The sdid of this AggregatesHistogramResponse.
:rtype: str
"""
return self._sdid
@sdid.setter
def sdid(self, sdid):
"""
Sets the sdid of this AggregatesHistogramResponse.
:param sdid: The sdid of this AggregatesHistogramResponse.
:type: str
"""
self._sdid = sdid
@property
def size(self):
"""
Gets the size of this AggregatesHistogramResponse.
:return: The size of this AggregatesHistogramResponse.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of this AggregatesHistogramResponse.
:param size: The size of this AggregatesHistogramResponse.
:type: int
"""
self._size = size
@property
def start_date(self):
"""
Gets the start_date of this AggregatesHistogramResponse.
:return: The start_date of this AggregatesHistogramResponse.
:rtype: int
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""
Sets the start_date of this AggregatesHistogramResponse.
:param start_date: The start_date of this AggregatesHistogramResponse.
:type: int
"""
self._start_date = start_date
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
|
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| """
Returns the string representation of the model
"""
return pformat(self.to_dict()) |
download.py | from .core import Core, Settings
class | (Core):
host = 'https://artifacts.elastic.co/downloads/beats/elastic-agent/{endpoint}'
endpoint = Settings.download_endpoint
kwargs = {
'stream': True
}
def parse_response(self, response):
self.__logger.debug('Saving file to download path: {}'.format(Settings.download_path))
with open(Settings.download_path, 'wb+') as f:
for chunk in response.raw.stream(1024, decode_content=False):
if chunk:
f.write(chunk)
self.__logger.debug('File saved successfully')
| Download |
md5_digest.rs | use crate::common::*;
#[serde(transparent)]
#[derive(Deserialize, Serialize, Debug, Eq, PartialEq, Copy, Clone)]
pub(crate) struct Md5Digest {
#[serde(with = "SerHex::<serde_hex::Strict>")]
bytes: [u8; 16],
}
impl Md5Digest {
#[cfg(test)]
pub(crate) fn from_hex(hex: &str) -> Self {
assert_eq!(hex.len(), 32);
| let mut bytes: [u8; 16] = [0; 16];
for n in 0..16 {
let i = n * 2;
bytes[n] = u8::from_str_radix(&hex[i..i + 2], 16).unwrap();
}
Self { bytes }
}
#[cfg(test)]
pub(crate) fn from_data(data: impl AsRef<[u8]>) -> Self {
md5::compute(data).into()
}
}
impl From<md5::Digest> for Md5Digest {
fn from(digest: md5::Digest) -> Self {
Self { bytes: digest.0 }
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ser() {
let digest = Md5Digest {
bytes: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
};
let bytes = bendy::serde::ser::to_bytes(&digest).unwrap();
assert_eq!(
str::from_utf8(&bytes).unwrap(),
"32:000102030405060708090a0b0c0d0e0f"
);
let string_bytes = bendy::serde::ser::to_bytes(&"000102030405060708090a0b0c0d0e0f").unwrap();
assert_eq!(bytes, string_bytes);
}
} | |
dtypes_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import operator
import unittest
import six
if six.PY3:
import enum
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import jax
from jax import dtypes
from jax import numpy as np
from jax import test_util as jtu
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
bool_dtypes = [onp.dtype('bool')]
signed_dtypes = [onp.dtype('int8'), onp.dtype('int16'), onp.dtype('int32'),
onp.dtype('int64')]
unsigned_dtypes = [onp.dtype('uint8'), onp.dtype('uint16'), onp.dtype('uint32'),
onp.dtype('uint64')]
onp_float_dtypes = [onp.dtype('float16'), onp.dtype('float32'),
onp.dtype('float64')]
float_dtypes = [onp.dtype(dtypes.bfloat16)] + onp_float_dtypes
complex_dtypes = [onp.dtype('complex64'), onp.dtype('complex128')]
all_dtypes = (bool_dtypes + signed_dtypes + unsigned_dtypes + float_dtypes +
complex_dtypes)
class DtypesTest(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": "_type={}".format(type.__name__), "type": type,
"dtype": dtype}
for type, dtype in [(bool, np.bool_), (int, np.int_), (float, np.float_),
(complex, np.complex_)])
def testDefaultTypes(self, type, dtype):
for f in [np.array, jax.jit(np.array), jax.jit(lambda x: x)]:
y = f(type(0))
self.assertTrue(isinstance(y, np.ndarray), msg=(f, y))
self.assertEqual(y.dtype, dtypes.canonicalize_dtype(dtype), msg=(f, y))
@parameterized.named_parameters(
{"testcase_name": "_swap={}_jit={}".format(swap, jit),
"swap": swap, "jit": jit}
for swap in [False, True] for jit in [False, True])
@jtu.skip_on_devices("tpu") # F16 not supported on TPU
def testBinaryPromotion(self, swap, jit):
testcases = [
(np.array(1.), 0., np.float_),
(np.array(1.), np.array(0.), np.float_),
(np.array(1.), np.array(0., dtype=np.float16), np.float_),
(np.array(1.), np.array(0., dtype=np.float32), np.float_),
(np.array(1.), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float16), 0., np.float16),
(np.array(1., dtype=np.float32), 0., np.float32),
(np.array(1., dtype=np.float64), 0., np.float64),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float16), np.float16),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float32), np.float32),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float32), np.array(0., dtype=np.float32), np.float32),
(np.array(1., dtype=np.float32), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float64), np.array(0., dtype=np.float64), np.float64),
(np.array([1.]), 0., np.float_),
(np.array([1.]), np.array(0.), np.float_),
(np.array([1.]), np.array(0., dtype=np.float16), np.float_),
(np.array([1.]), np.array(0., dtype=np.float32), np.float_),
(np.array([1.]), np.array(0., dtype=np.float64), np.float64),
(np.array([1.], dtype=np.float32), np.array(0., dtype=np.float16), np.float32),
(np.array([1.], dtype=np.float16), np.array(0., dtype=np.float32), np.float32),
(np.array([1.], dtype=np.float16), 0., np.float16),
]
op = jax.jit(operator.add) if jit else operator.add
for x, y, dtype in testcases:
x, y = (y, x) if swap else (x, y)
z = x + y
self.assertTrue(isinstance(z, np.ndarray), msg=(x, y, z))
self.assertEqual(z.dtype, dtypes.canonicalize_dtype(dtype), msg=(x, y, z))
def testPromoteDtypes(self):
for t1 in all_dtypes:
self.assertEqual(t1, dtypes.promote_types(t1, t1))
self.assertEqual(t1, dtypes.promote_types(t1, onp.bool_))
self.assertEqual(onp.dtype(onp.complex128),
dtypes.promote_types(t1, onp.complex128))
for t2 in all_dtypes:
# Symmetry
self.assertEqual(dtypes.promote_types(t1, t2),
dtypes.promote_types(t2, t1))
self.assertEqual(onp.dtype(onp.float32),
dtypes.promote_types(onp.float16, dtypes.bfloat16))
# Promotions of non-inexact types against inexact types always prefer
# the inexact types.
for t in float_dtypes + complex_dtypes:
for i in bool_dtypes + signed_dtypes + unsigned_dtypes:
self.assertEqual(t, dtypes.promote_types(t, i))
# Promotions between exact types, or between inexact types, match NumPy.
for groups in [bool_dtypes + signed_dtypes + unsigned_dtypes,
onp_float_dtypes + complex_dtypes]:
for t1, t2 in itertools.combinations(groups, 2):
self.assertEqual(onp.promote_types(t1, t2),
dtypes.promote_types(t1, t2))
@unittest.skipIf(six.PY2, "Test requires Python 3")
def | (self):
class AnEnum(enum.IntEnum):
A = 42
B = 101
onp.testing.assert_equal(onp.array(42), onp.array(AnEnum.A))
onp.testing.assert_equal(np.array(42), np.array(AnEnum.A))
onp.testing.assert_equal(onp.int32(101), onp.int32(AnEnum.B))
onp.testing.assert_equal(np.int32(101), np.int32(AnEnum.B))
if __name__ == "__main__":
absltest.main()
| testEnumPromotion |
authentication.go | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"time"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
"github.com/golang/protobuf/ptypes"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pkg/spiffe"
)
const (
// SDSStatPrefix is the human readable prefix to use when emitting statistics for the SDS service.
SDSStatPrefix = "sdsstat"
// SDSClusterName is the name of the cluster for SDS connections
SDSClusterName = "sds-grpc"
// SDSDefaultResourceName is the default name in sdsconfig, used for fetching normal key/cert.
SDSDefaultResourceName = "default"
// SDSRootResourceName is the sdsconfig name for root CA, used for fetching root cert.
SDSRootResourceName = "ROOTCA"
// K8sSAJwtFileName is the token volume mount file name for k8s jwt token.
K8sSAJwtFileName = "/var/run/secrets/kubernetes.io/serviceaccount/token"
// K8sSATrustworthyJwtFileName is the token volume mount file name for k8s trustworthy jwt token.
K8sSATrustworthyJwtFileName = "/var/run/secrets/tokens/istio-token"
// K8sSAJwtTokenHeaderKey is the request header key for k8s jwt token.
// Binary header name must has suffix "-bin", according to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md.
K8sSAJwtTokenHeaderKey = "istio_sds_credentials_header-bin"
// SdsCaSuffix is the suffix of the sds resource name for root CA.
SdsCaSuffix = "-cacert"
// EnvoyJwtFilterName is the name of the Envoy JWT filter. This should be the same as the name defined
// in https://github.com/envoyproxy/envoy/blob/v1.9.1/source/extensions/filters/http/well_known_names.h#L48
EnvoyJwtFilterName = "envoy.filters.http.jwt_authn"
// AuthnFilterName is the name for the Istio AuthN filter. This should be the same
// as the name defined in
// https://github.com/istio/proxy/blob/master/src/envoy/http/authn/http_filter_factory.cc#L30
AuthnFilterName = "istio_authn"
// KubernetesSecretType is the name of a SDS secret stored in Kubernetes
KubernetesSecretType = "kubernetes"
KubernetesSecretTypeURI = KubernetesSecretType + "://"
)
var SDSAdsConfig = &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_Ads{
Ads: &core.AggregatedConfigSource{},
},
ResourceApiVersion: core.ApiVersion_V3,
}
// ConstructSdsSecretConfigForCredential constructs SDS secret configuration used
// from certificates referenced by credentialName in DestinationRule or Gateway.
// Currently this is served by a local SDS server, but in the future replaced by
// Istiod SDS server.
func ConstructSdsSecretConfigForCredential(name string) *tls.SdsSecretConfig {
if name == "" {
return nil
}
return &tls.SdsSecretConfig{
Name: KubernetesSecretTypeURI + name,
SdsConfig: SDSAdsConfig,
}
}
// Preconfigured SDS configs to avoid excessive memory allocations
var (
// set the fetch timeout to 0 here in legacyDefaultSDSConfig and rootSDSConfig
// because workload certs are guaranteed exist.
legacyDefaultSDSConfig = &tls.SdsSecretConfig{
Name: SDSDefaultResourceName,
SdsConfig: &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{
ApiConfigSource: &core.ApiConfigSource{
ApiType: core.ApiConfigSource_GRPC,
TransportApiVersion: core.ApiVersion_V3,
GrpcServices: []*core.GrpcService{
{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName},
},
},
},
},
},
ResourceApiVersion: core.ApiVersion_V3,
InitialFetchTimeout: ptypes.DurationProto(time.Second * 0),
},
}
legacyRootSDSConfig = &tls.SdsSecretConfig{
Name: SDSRootResourceName,
SdsConfig: &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{
ApiConfigSource: &core.ApiConfigSource{
ApiType: core.ApiConfigSource_GRPC,
TransportApiVersion: core.ApiVersion_V3,
GrpcServices: []*core.GrpcService{
{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName},
},
},
},
},
},
ResourceApiVersion: core.ApiVersion_V3,
InitialFetchTimeout: ptypes.DurationProto(time.Second * 0),
},
}
defaultSDSConfig = &tls.SdsSecretConfig{
Name: SDSDefaultResourceName,
SdsConfig: &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{
ApiConfigSource: &core.ApiConfigSource{
ApiType: core.ApiConfigSource_GRPC,
SetNodeOnFirstMessageOnly: true,
TransportApiVersion: core.ApiVersion_V3,
GrpcServices: []*core.GrpcService{
{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName},
},
},
},
},
},
ResourceApiVersion: core.ApiVersion_V3,
InitialFetchTimeout: ptypes.DurationProto(time.Second * 0),
},
}
rootSDSConfig = &tls.SdsSecretConfig{
Name: SDSRootResourceName,
SdsConfig: &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{
ApiConfigSource: &core.ApiConfigSource{
ApiType: core.ApiConfigSource_GRPC,
SetNodeOnFirstMessageOnly: true,
TransportApiVersion: core.ApiVersion_V3,
GrpcServices: []*core.GrpcService{
{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName},
},
},
},
},
},
ResourceApiVersion: core.ApiVersion_V3,
InitialFetchTimeout: ptypes.DurationProto(time.Second * 0),
},
}
)
// ConstructSdsSecretConfig constructs SDS Secret Configuration for workload proxy.
func ConstructSdsSecretConfig(name string, node *model.Proxy) *tls.SdsSecretConfig {
if name == "" {
return nil
}
if name == SDSDefaultResourceName {
if util.IsIstioVersionGE19(node) {
return defaultSDSConfig
}
return legacyDefaultSDSConfig
}
if name == SDSRootResourceName {
if util.IsIstioVersionGE19(node) {
return rootSDSConfig
}
return legacyRootSDSConfig
}
cfg := &tls.SdsSecretConfig{
Name: name,
SdsConfig: &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{
ApiConfigSource: &core.ApiConfigSource{
ApiType: core.ApiConfigSource_GRPC,
TransportApiVersion: core.ApiVersion_V3,
GrpcServices: []*core.GrpcService{
{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName},
},
},
},
},
},
ResourceApiVersion: core.ApiVersion_V3,
},
}
if util.IsIstioVersionGE19(node) {
cfg.SdsConfig.GetApiConfigSource().SetNodeOnFirstMessageOnly = true
}
return cfg
}
// ConstructValidationContext constructs ValidationContext in CommonTLSContext.
func | (rootCAFilePath string, subjectAltNames []string) *tls.CommonTlsContext_ValidationContext {
ret := &tls.CommonTlsContext_ValidationContext{
ValidationContext: &tls.CertificateValidationContext{
TrustedCa: &core.DataSource{
Specifier: &core.DataSource_Filename{
Filename: rootCAFilePath,
},
},
},
}
if len(subjectAltNames) > 0 {
ret.ValidationContext.MatchSubjectAltNames = util.StringToExactMatch(subjectAltNames)
}
return ret
}
func appendURIPrefixToTrustDomain(trustDomainAliases []string) []string {
var res []string
for _, td := range trustDomainAliases {
res = append(res, spiffe.URIPrefix+td+"/")
}
return res
}
// ApplyToCommonTLSContext completes the commonTlsContext
func ApplyToCommonTLSContext(tlsContext *tls.CommonTlsContext, proxy *model.Proxy,
subjectAltNames []string, trustDomainAliases []string) {
// These are certs being mounted from within the pod. Rather than reading directly in Envoy,
// which does not support rotation, we will serve them over SDS by reading the files.
// We should check if these certs have values, if yes we should use them or otherwise fall back to defaults.
res := model.SdsCertificateConfig{
CertificatePath: proxy.Metadata.TLSServerCertChain,
PrivateKeyPath: proxy.Metadata.TLSServerKey,
CaCertificatePath: proxy.Metadata.TLSServerRootCert,
}
// TODO: if subjectAltName ends with *, create a prefix match as well.
// TODO: if user explicitly specifies SANs - should we alter his explicit config by adding all spifee aliases?
matchSAN := util.StringToExactMatch(subjectAltNames)
if len(trustDomainAliases) > 0 {
matchSAN = append(matchSAN, util.StringToPrefixMatch(appendURIPrefixToTrustDomain(trustDomainAliases))...)
}
// configure server listeners with SDS.
tlsContext.ValidationContextType = &tls.CommonTlsContext_CombinedValidationContext{
CombinedValidationContext: &tls.CommonTlsContext_CombinedCertificateValidationContext{
DefaultValidationContext: &tls.CertificateValidationContext{MatchSubjectAltNames: matchSAN},
ValidationContextSdsSecretConfig: ConstructSdsSecretConfig(model.GetOrDefault(res.GetRootResourceName(), SDSRootResourceName), proxy),
},
}
tlsContext.TlsCertificateSdsSecretConfigs = []*tls.SdsSecretConfig{
ConstructSdsSecretConfig(model.GetOrDefault(res.GetResourceName(), SDSDefaultResourceName), proxy),
}
}
// ApplyCustomSDSToClientCommonTLSContext applies the customized sds to CommonTlsContext
// Used for building upstream TLS context for egress gateway's TLS/mTLS origination
func ApplyCustomSDSToClientCommonTLSContext(tlsContext *tls.CommonTlsContext, tlsOpts *networking.ClientTLSSettings) {
if tlsOpts.Mode == networking.ClientTLSSettings_MUTUAL {
// create SDS config for gateway to fetch key/cert from agent.
tlsContext.TlsCertificateSdsSecretConfigs = []*tls.SdsSecretConfig{
ConstructSdsSecretConfigForCredential(tlsOpts.CredentialName),
}
}
// create SDS config for gateway to fetch certificate validation context
// at gateway agent.
defaultValidationContext := &tls.CertificateValidationContext{
MatchSubjectAltNames: util.StringToExactMatch(tlsOpts.SubjectAltNames),
}
tlsContext.ValidationContextType = &tls.CommonTlsContext_CombinedValidationContext{
CombinedValidationContext: &tls.CommonTlsContext_CombinedCertificateValidationContext{
DefaultValidationContext: defaultValidationContext,
ValidationContextSdsSecretConfig: ConstructSdsSecretConfigForCredential(tlsOpts.CredentialName + SdsCaSuffix),
},
}
}
// ApplyCredentialSDSToServerCommonTLSContext applies the credentialName sds (Gateway/DestinationRule) to CommonTlsContext
// Used for building both gateway/sidecar TLS context
func ApplyCredentialSDSToServerCommonTLSContext(tlsContext *tls.CommonTlsContext, tlsOpts *networking.ServerTLSSettings) {
// create SDS config for gateway/sidecar to fetch key/cert from agent.
tlsContext.TlsCertificateSdsSecretConfigs = []*tls.SdsSecretConfig{
ConstructSdsSecretConfigForCredential(tlsOpts.CredentialName),
}
// If tls mode is MUTUAL, create SDS config for gateway/sidecar to fetch certificate validation context
// at gateway agent. Otherwise, use the static certificate validation context config.
if tlsOpts.Mode == networking.ServerTLSSettings_MUTUAL {
defaultValidationContext := &tls.CertificateValidationContext{
MatchSubjectAltNames: util.StringToExactMatch(tlsOpts.SubjectAltNames),
VerifyCertificateSpki: tlsOpts.VerifyCertificateSpki,
VerifyCertificateHash: tlsOpts.VerifyCertificateHash,
}
tlsContext.ValidationContextType = &tls.CommonTlsContext_CombinedValidationContext{
CombinedValidationContext: &tls.CommonTlsContext_CombinedCertificateValidationContext{
DefaultValidationContext: defaultValidationContext,
ValidationContextSdsSecretConfig: ConstructSdsSecretConfigForCredential(tlsOpts.CredentialName + SdsCaSuffix),
},
}
} else if len(tlsOpts.SubjectAltNames) > 0 {
tlsContext.ValidationContextType = &tls.CommonTlsContext_ValidationContext{
ValidationContext: &tls.CertificateValidationContext{
MatchSubjectAltNames: util.StringToExactMatch(tlsOpts.SubjectAltNames),
},
}
}
}
| ConstructValidationContext |
lib.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#![cfg(test)]
use std::{convert::TryFrom as _, mem::size_of};
use fidl_fuchsia_net as net;
use fidl_fuchsia_net_stack as net_stack;
use fuchsia_async::{self as fasync, DurationExt as _, TimeoutExt as _};
use fuchsia_zircon as zx;
use anyhow::{self, Context as _};
use futures::{
future, Future, FutureExt as _, StreamExt as _, TryFutureExt as _, TryStreamExt as _,
};
use net_types::{
ethernet::Mac,
ip::{self as net_types_ip, Ip},
LinkLocalAddress as _, MulticastAddress as _, SpecifiedAddress as _, Witness as _,
};
use netstack_testing_common::{
constants::{eth as eth_consts, ipv6 as ipv6_consts},
realms::{constants, KnownServiceProvider, Netstack, Netstack2, NetstackVersion},
send_ra_with_router_lifetime, setup_network, setup_network_with, sleep, write_ndp_message,
ASYNC_EVENT_CHECK_INTERVAL, ASYNC_EVENT_NEGATIVE_CHECK_TIMEOUT,
ASYNC_EVENT_POSITIVE_CHECK_TIMEOUT, NDP_MESSAGE_TTL,
};
use netstack_testing_macros::variants_test;
use packet::ParsablePacket as _;
use packet_formats::{
ethernet::{EtherType, EthernetFrame, EthernetFrameLengthCheck},
icmp::{
mld::MldPacket,
ndp::{
options::{NdpOption, NdpOptionBuilder, PrefixInformation, RouteInformation},
NeighborAdvertisement, NeighborSolicitation, RoutePreference, RouterAdvertisement,
RouterSolicitation,
},
IcmpParseArgs, Icmpv6Packet,
},
ip::Ipv6Proto,
testutil::{parse_icmp_packet_in_ip_packet_in_ethernet_frame, parse_ip_packet},
};
use test_case::test_case;
/// The expected number of Router Solicitations sent by the netstack when an
/// interface is brought up as a host.
const EXPECTED_ROUTER_SOLICIATIONS: u8 = 3;
/// The expected interval between sending Router Solicitation messages when
/// soliciting IPv6 routers.
const EXPECTED_ROUTER_SOLICITATION_INTERVAL: zx::Duration = zx::Duration::from_seconds(4);
/// The expected number of Neighbor Solicitations sent by the netstack when
/// performing Duplicate Address Detection.
const EXPECTED_DUP_ADDR_DETECT_TRANSMITS: u8 = 1;
/// The expected interval between sending Neighbor Solicitation messages when
/// performing Duplicate Address Detection.
const EXPECTED_DAD_RETRANSMIT_TIMER: zx::Duration = zx::Duration::from_seconds(1);
/// As per [RFC 7217 section 6] Hosts SHOULD introduce a random delay between 0 and
/// `IDGEN_DELAY` before trying a new tentative address.
///
/// [RFC 7217]: https://tools.ietf.org/html/rfc7217#section-6
const DAD_IDGEN_DELAY: zx::Duration = zx::Duration::from_seconds(1);
async fn install_and_get_ipv6_addrs_for_endpoint<N: Netstack>(
realm: &netemul::TestRealm<'_>,
endpoint: &netemul::TestEndpoint<'_>,
name: String,
) -> Vec<net::Subnet> {
let (id, control, _device_control) =
endpoint.add_to_stack(realm, Some(name)).await.expect("installing interface");
let did_enable = control.enable().await.expect("calling enable").expect("enable failed");
assert!(did_enable);
let interface_state = realm
.connect_to_protocol::<fidl_fuchsia_net_interfaces::StateMarker>()
.expect("failed to connect to fuchsia.net.interfaces/State service");
let mut state = fidl_fuchsia_net_interfaces_ext::InterfaceState::Unknown(id.into());
let ipv6_addresses = fidl_fuchsia_net_interfaces_ext::wait_interface_with_id(
fidl_fuchsia_net_interfaces_ext::event_stream_from_state(&interface_state)
.expect("creating interface event stream"),
&mut state,
|fidl_fuchsia_net_interfaces_ext::Properties {
id: _,
name: _,
device_class: _,
online: _,
addresses,
has_default_ipv4_route: _,
has_default_ipv6_route: _,
}| {
let ipv6_addresses = addresses
.iter()
.map(|fidl_fuchsia_net_interfaces_ext::Address { addr, valid_until: _ }| addr)
.filter(|fidl_fuchsia_net::Subnet { addr, prefix_len: _ }| match addr {
net::IpAddress::Ipv4(net::Ipv4Address { addr: _ }) => false,
net::IpAddress::Ipv6(net::Ipv6Address { addr: _ }) => true,
})
.copied()
.collect::<Vec<_>>();
if ipv6_addresses.is_empty() {
None
} else {
Some(ipv6_addresses)
}
},
)
.await
.expect("failed to observe interface addition");
ipv6_addresses
}
/// Test that across netstack runs, a device will initially be assigned the same
/// IPv6 addresses.
#[variants_test]
async fn consistent_initial_ipv6_addrs<E: netemul::Endpoint>(name: &str) {
let sandbox = netemul::TestSandbox::new().expect("failed to create sandbox");
let realm = sandbox
.create_realm(
name,
&[
// This test exercises stash persistence. Netstack-debug, which
// is the default used by test helpers, does not use
// persistence.
KnownServiceProvider::Netstack(NetstackVersion::ProdNetstack2),
KnownServiceProvider::SecureStash,
],
)
.expect("failed to create realm");
let endpoint = sandbox.create_endpoint::<E, _>(name).await.expect("failed to create endpoint");
let () = endpoint.set_link_up(true).await.expect("failed to set link up");
// Make sure netstack uses the same addresses across runs for a device.
let first_run_addrs =
install_and_get_ipv6_addrs_for_endpoint::<Netstack2>(&realm, &endpoint, name.to_string())
.await;
// Stop the netstack.
let () = realm
.stop_child_component(constants::netstack::COMPONENT_NAME)
.await
.expect("failed to stop netstack");
let second_run_addrs =
install_and_get_ipv6_addrs_for_endpoint::<Netstack2>(&realm, &endpoint, name.to_string())
.await;
assert_eq!(first_run_addrs, second_run_addrs);
}
/// Tests that `EXPECTED_ROUTER_SOLICIATIONS` Router Solicitation messages are transmitted
/// when the interface is brought up.
#[variants_test]
#[test_case("host", false ; "host")]
#[test_case("router", true ; "router")]
async fn sends_router_solicitations<E: netemul::Endpoint>(
test_name: &str,
sub_test_name: &str,
forwarding: bool,
) {
let name = format!("{}_{}", test_name, sub_test_name);
let name = name.as_str();
let sandbox = netemul::TestSandbox::new().expect("failed to create sandbox");
let (_network, realm, _netstack, _iface, fake_ep) =
setup_network::<E>(&sandbox, name).await.expect("error setting up network");
if forwarding {
let stack = realm
.connect_to_protocol::<net_stack::StackMarker>()
.expect("failed to get stack proxy");
let () = stack.enable_ip_forwarding().await.expect("error enabling IP forwarding");
}
// Make sure exactly `EXPECTED_ROUTER_SOLICIATIONS` RS messages are transmitted
// by the netstack.
let mut observed_rs = 0;
loop {
// When we have already observed the expected number of RS messages, do a
// negative check to make sure that we don't send anymore.
let extra_timeout = if observed_rs == EXPECTED_ROUTER_SOLICIATIONS {
ASYNC_EVENT_NEGATIVE_CHECK_TIMEOUT
} else {
ASYNC_EVENT_POSITIVE_CHECK_TIMEOUT
};
let ret = fake_ep
.frame_stream()
.try_filter_map(|(data, dropped)| {
assert_eq!(dropped, 0);
let mut observed_slls = Vec::new();
future::ok(
parse_icmp_packet_in_ip_packet_in_ethernet_frame::<
net_types_ip::Ipv6,
_,
RouterSolicitation,
_,
>(&data, |p| {
for option in p.body().iter() {
if let NdpOption::SourceLinkLayerAddress(a) = option {
let mut mac_bytes = [0; 6];
mac_bytes.copy_from_slice(&a[..size_of::<Mac>()]);
observed_slls.push(Mac::new(mac_bytes));
} else {
// We should only ever have an NDP Source Link-Layer Address
// option in a RS.
panic!("unexpected option in RS = {:?}", option);
}
}
})
.map_or(
None,
|(_src_mac, dst_mac, src_ip, dst_ip, ttl, _message, _code)| {
Some((dst_mac, src_ip, dst_ip, ttl, observed_slls))
},
),
)
})
.try_next()
.map(|r| r.context("error getting OnData event"))
.on_timeout((EXPECTED_ROUTER_SOLICITATION_INTERVAL + extra_timeout).after_now(), || {
// If we already observed `EXPECTED_ROUTER_SOLICIATIONS` RS, then we shouldn't
// have gotten any more; the timeout is expected.
if observed_rs == EXPECTED_ROUTER_SOLICIATIONS {
return Ok(None);
}
return Err(anyhow::anyhow!("timed out waiting for the {}-th RS", observed_rs));
})
.await
.unwrap();
let (dst_mac, src_ip, dst_ip, ttl, observed_slls) = match ret {
Some((dst_mac, src_ip, dst_ip, ttl, observed_slls)) => {
(dst_mac, src_ip, dst_ip, ttl, observed_slls)
}
None => break,
};
assert_eq!(
dst_mac,
Mac::from(&net_types_ip::Ipv6::ALL_ROUTERS_LINK_LOCAL_MULTICAST_ADDRESS)
);
// DAD should have resolved for the link local IPv6 address that is assigned to
// the interface when it is first brought up. When a link local address is
// assigned to the interface, it should be used for transmitted RS messages.
if observed_rs > 0 {
assert!(src_ip.is_specified())
}
assert_eq!(dst_ip, net_types_ip::Ipv6::ALL_ROUTERS_LINK_LOCAL_MULTICAST_ADDRESS.get());
assert_eq!(ttl, NDP_MESSAGE_TTL);
// The Router Solicitation should only ever have at max 1 source
// link-layer option.
assert!(observed_slls.len() <= 1);
let observed_sll = observed_slls.into_iter().nth(0);
if src_ip.is_specified() {
if observed_sll.is_none() {
panic!("expected source-link-layer address option if RS has a specified source IP address");
}
} else if observed_sll.is_some() {
panic!("unexpected source-link-layer address option for RS with unspecified source IP address");
}
observed_rs += 1;
}
assert_eq!(observed_rs, EXPECTED_ROUTER_SOLICIATIONS);
}
/// Tests that both stable and temporary SLAAC addresses are generated for a SLAAC prefix.
#[variants_test]
#[test_case("host", false ; "host")]
#[test_case("router", true ; "router")]
async fn slaac_with_privacy_extensions<E: netemul::Endpoint>(
test_name: &str,
sub_test_name: &str,
forwarding: bool,
) {
let name = format!("{}_{}", test_name, sub_test_name);
let name = name.as_str();
let sandbox = netemul::TestSandbox::new().expect("failed to create sandbox");
let (_network, realm, _netstack, iface, fake_ep) =
setup_network::<E>(&sandbox, name).await.expect("error setting up network");
if forwarding {
let stack = realm
.connect_to_protocol::<net_stack::StackMarker>()
.expect("failed to get stack proxy");
let () = stack.enable_ip_forwarding().await.expect("error enabling IP forwarding");
}
// Wait for a Router Solicitation.
//
// The first RS should be sent immediately.
let () = fake_ep
.frame_stream()
.try_filter_map(|(data, dropped)| {
assert_eq!(dropped, 0);
future::ok(
parse_icmp_packet_in_ip_packet_in_ethernet_frame::<
net_types_ip::Ipv6,
_,
RouterSolicitation,
_,
>(&data, |_| {})
.map_or(None, |_| Some(())),
)
})
.try_next()
.map(|r| r.context("error getting OnData event"))
.on_timeout(ASYNC_EVENT_POSITIVE_CHECK_TIMEOUT.after_now(), || {
Err(anyhow::anyhow!("timed out waiting for RS packet"))
})
.await
.unwrap()
.expect("failed to get next OnData event");
// Send a Router Advertisement with information for a SLAAC prefix.
let ra = RouterAdvertisement::new(
0, /* current_hop_limit */
false, /* managed_flag */
false, /* other_config_flag */
0, /* router_lifetime */
0, /* reachable_time */
0, /* retransmit_timer */
);
let pi = PrefixInformation::new(
ipv6_consts::PREFIX.prefix(), /* prefix_length */
false, /* on_link_flag */
true, /* autonomous_address_configuration_flag */
99999, /* valid_lifetime */
99999, /* preferred_lifetime */
ipv6_consts::PREFIX.network(), /* prefix */
);
let options = [NdpOptionBuilder::PrefixInformation(pi)];
let () = write_ndp_message::<&[u8], _>(
eth_consts::MAC_ADDR,
Mac::from(&net_types_ip::Ipv6::ALL_NODES_LINK_LOCAL_MULTICAST_ADDRESS),
ipv6_consts::LINK_LOCAL_ADDR,
net_types_ip::Ipv6::ALL_NODES_LINK_LOCAL_MULTICAST_ADDRESS.get(),
ra,
&options,
&fake_ep,
)
.await
.expect("failed to write NDP message");
// Wait for the SLAAC addresses to be generated.
//
// We expect two addresses for the SLAAC prefixes to be assigned to the NIC as the
// netstack should generate both a stable and temporary SLAAC address.
let interface_state = realm
.connect_to_protocol::<fidl_fuchsia_net_interfaces::StateMarker>()
.expect("failed to connect to fuchsia.net.interfaces/State");
let expected_addrs = 2;
fidl_fuchsia_net_interfaces_ext::wait_interface_with_id(
fidl_fuchsia_net_interfaces_ext::event_stream_from_state(&interface_state)
.expect("error getting interface state event stream"),
&mut fidl_fuchsia_net_interfaces_ext::InterfaceState::Unknown(iface.id()),
|fidl_fuchsia_net_interfaces_ext::Properties { addresses, .. }| {
if addresses
.iter()
.filter_map(
|&fidl_fuchsia_net_interfaces_ext::Address {
addr: fidl_fuchsia_net::Subnet { addr, prefix_len: _ },
valid_until: _,
}| {
match addr {
net::IpAddress::Ipv4(net::Ipv4Address { addr: _ }) => None,
net::IpAddress::Ipv6(net::Ipv6Address { addr }) => {
// TODO(https://github.com/rust-lang/rust/issues/80967): use bool::then_some.
ipv6_consts::PREFIX
.contains(&net_types_ip::Ipv6Addr::from_bytes(addr))
.then(|| ())
}
}
},
)
.count()
== expected_addrs as usize
{
Some(())
} else {
None
}
},
)
.map_err(anyhow::Error::from)
.on_timeout(
(EXPECTED_DAD_RETRANSMIT_TIMER * EXPECTED_DUP_ADDR_DETECT_TRANSMITS * expected_addrs
+ ASYNC_EVENT_POSITIVE_CHECK_TIMEOUT)
.after_now(),
|| Err(anyhow::anyhow!("timed out")),
)
.await
.expect("failed to wait for SLAAC addresses to be generated")
}
/// Tests that if the netstack attempts to assign an address to an interface, and a remote node
/// is already assigned the address or attempts to assign the address at the same time, DAD
/// fails on the local interface.
///
/// If no remote node has any interest in an address the netstack is attempting to assign to
/// an interface, DAD should succeed.
// TODO(https://fxbug.dev/82046): Rewrite this test using fuchsia.net.interfaces.admin. Add
// addresses using Control.AddAddress and watch for DAD_FAILED using
// AddressStateProvider.OnAddressRemoved instead of the timeouts below.
#[variants_test]
async fn duplicate_address_detection<E: netemul::Endpoint>(name: &str) {
/// Makes sure that `ipv6_consts::LINK_LOCAL_ADDR` is not assigned to the interface after the
/// DAD resolution time.
async fn check_address_failed_dad(iface: &netemul::TestInterface<'_>) {
// Clocks sometimes jump in infrastructure, which can cause a timer to expire prematurely.
// Fortunately such jumps are rarely seen in quick succession - if we repeatedly wait for
// shorter durations we can be reasonably sure that the intended amount of time truly did
// elapse. It is expected that at most one timer worth of time may be lost.
const STEP: zx::Duration = zx::Duration::from_millis(10);
let duration = EXPECTED_DAD_RETRANSMIT_TIMER * EXPECTED_DUP_ADDR_DETECT_TRANSMITS
+ ASYNC_EVENT_NEGATIVE_CHECK_TIMEOUT;
let iterations =
(duration + STEP - zx::Duration::from_nanos(1)).into_nanos() / STEP.into_nanos();
let iterations = usize::try_from(iterations).expect("integer conversion");
let () = fasync::Interval::new(STEP).take(iterations).collect().await;
let link_local_addr = net::Subnet {
addr: net::IpAddress::Ipv6(net::Ipv6Address {
addr: ipv6_consts::LINK_LOCAL_ADDR.ipv6_bytes(),
}),
prefix_len: 64,
};
let addrs = iface.get_addrs().await.expect("error getting interface addresses");
assert!(
!addrs
.iter()
.any(|fidl_fuchsia_net_interfaces_ext::Address { addr, valid_until: _ }| *addr
== link_local_addr),
"DAD did not fail, found {:?} in {:?}",
link_local_addr,
addrs
);
}
/// Transmits a Neighbor Solicitation message and expects `ipv6_consts::LINK_LOCAL_ADDR`
/// to not be assigned to the interface after the normal resolution time for DAD.
async fn fail_dad_with_ns(
iface: &netemul::TestInterface<'_>,
fake_ep: &netemul::TestFakeEndpoint<'_>,
) {
let snmc = ipv6_consts::LINK_LOCAL_ADDR.to_solicited_node_address();
let () = write_ndp_message::<&[u8], _>(
eth_consts::MAC_ADDR,
Mac::from(&snmc),
net_types_ip::Ipv6::UNSPECIFIED_ADDRESS,
snmc.get(),
NeighborSolicitation::new(ipv6_consts::LINK_LOCAL_ADDR),
&[],
fake_ep,
)
.await
.expect("failed to write NDP message");
check_address_failed_dad(iface).await
}
/// Transmits a Neighbor Advertisement message and expects `ipv6_consts::LINK_LOCAL_ADDR`
/// to not be assigned to the interface after the normal resolution time for DAD.
async fn fail_dad_with_na(
iface: &netemul::TestInterface<'_>,
fake_ep: &netemul::TestFakeEndpoint<'_>,
) {
let () = write_ndp_message::<&[u8], _>(
eth_consts::MAC_ADDR,
Mac::from(&net_types_ip::Ipv6::ALL_NODES_LINK_LOCAL_MULTICAST_ADDRESS),
ipv6_consts::LINK_LOCAL_ADDR,
net_types_ip::Ipv6::ALL_NODES_LINK_LOCAL_MULTICAST_ADDRESS.get(),
NeighborAdvertisement::new(
false, /* router_flag */
false, /* solicited_flag */
false, /* override_flag */
ipv6_consts::LINK_LOCAL_ADDR,
),
&[NdpOptionBuilder::TargetLinkLayerAddress(ð_consts::MAC_ADDR.bytes())],
fake_ep,
)
.await
.expect("failed to write NDP message");
check_address_failed_dad(iface).await
}
// Wait for and verify a NS message transmitted by netstack for DAD.
async fn expect_dad_neighbor_solicitation(fake_ep: &netemul::TestFakeEndpoint<'_>) {
let ret = fake_ep
.frame_stream()
.try_filter_map(|(data, dropped)| {
assert_eq!(dropped, 0);
future::ok(
parse_icmp_packet_in_ip_packet_in_ethernet_frame::<
net_types_ip::Ipv6,
_,
NeighborSolicitation,
_,
>(&data, |p| assert_eq!(p.body().iter().count(), 0))
.map_or(None, |(_src_mac, dst_mac, src_ip, dst_ip, ttl, message, _code)| {
// If the NS is not for the address we just added, this is for some
// other address. We ignore it as it is not relevant to our test.
if message.target_address() != &ipv6_consts::LINK_LOCAL_ADDR {
return None;
}
Some((dst_mac, src_ip, dst_ip, ttl))
}),
)
})
.try_next()
.map(|r| r.context("error getting OnData event"))
.on_timeout(ASYNC_EVENT_POSITIVE_CHECK_TIMEOUT.after_now(), || {
Err(anyhow::anyhow!(
"timed out waiting for a neighbor solicitation targetting {}",
ipv6_consts::LINK_LOCAL_ADDR
))
})
.await
.unwrap()
.expect("failed to get next OnData event");
let (dst_mac, src_ip, dst_ip, ttl) = ret;
let expected_dst = ipv6_consts::LINK_LOCAL_ADDR.to_solicited_node_address();
assert_eq!(src_ip, net_types_ip::Ipv6::UNSPECIFIED_ADDRESS);
assert_eq!(dst_ip, expected_dst.get());
assert_eq!(dst_mac, Mac::from(&expected_dst));
assert_eq!(ttl, NDP_MESSAGE_TTL);
}
/// Adds `ipv6_consts::LINK_LOCAL_ADDR` to the interface and makes sure a Neighbor Solicitation
/// message is transmitted by the netstack for DAD.
///
/// Calls `fail_dad_fn` after the DAD message is observed so callers can simulate a remote
/// node that has some interest in the same address.
async fn add_address_for_dad<
'a,
'b: 'a,
R: 'b + Future<Output = ()>,
FN: FnOnce(&'b netemul::TestInterface<'a>, &'b netemul::TestFakeEndpoint<'a>) -> R,
>(
iface: &'b netemul::TestInterface<'a>,
fake_ep: &'b netemul::TestFakeEndpoint<'a>,
control: &'b fidl_fuchsia_net_interfaces_admin::ControlProxy,
fail_dad_fn: FN,
want_state: fidl_fuchsia_net_interfaces_admin::AddressAssignmentState,
) -> Result<
fidl_fuchsia_net_interfaces_admin::AddressStateProviderProxy,
fidl_fuchsia_net_interfaces_ext::admin::AddressStateProviderError,
> {
let (address_state_provider, server) = fidl::endpoints::create_proxy::<
fidl_fuchsia_net_interfaces_admin::AddressStateProviderMarker,
>()
.expect("create AddressStateProvider proxy");
let () = control
.add_address(
&mut net::InterfaceAddress::Ipv6(net::Ipv6Address {
addr: ipv6_consts::LINK_LOCAL_ADDR.ipv6_bytes(),
}),
fidl_fuchsia_net_interfaces_admin::AddressParameters::EMPTY,
server,
)
.expect("Control.AddAddress FIDL error");
match want_state {
fidl_fuchsia_net_interfaces_admin::AddressAssignmentState::Assigned
| fidl_fuchsia_net_interfaces_admin::AddressAssignmentState::Tentative => {
// The first DAD message should be sent immediately.
expect_dad_neighbor_solicitation(fake_ep).await;
// Ensure that fuchsia.net.interfaces/Watcher doesn't erroneously report the
// address as added before DAD completes successfully or otherwise.
assert_eq!(
iface.get_addrs().await.expect("failed to get addresses").into_iter().find(
|fidl_fuchsia_net_interfaces_ext::Address {
addr: fidl_fuchsia_net::Subnet { addr, prefix_len: _ },
valid_until: _,
}| {
match addr {
fidl_fuchsia_net::IpAddress::Ipv4(
fidl_fuchsia_net::Ipv4Address { addr: _ },
) => false,
fidl_fuchsia_net::IpAddress::Ipv6(
fidl_fuchsia_net::Ipv6Address { addr },
) => *addr == ipv6_consts::LINK_LOCAL_ADDR.ipv6_bytes(),
}
}
),
None,
"added IPv6 LL address already present even though it is tentative"
);
}
fidl_fuchsia_net_interfaces_admin::AddressAssignmentState::Unavailable => {}
}
fail_dad_fn(iface, fake_ep).await;
{
let state_stream = fidl_fuchsia_net_interfaces_ext::admin::assignment_state_stream(
address_state_provider.clone(),
);
futures::pin_mut!(state_stream);
let () = fidl_fuchsia_net_interfaces_ext::admin::wait_assignment_state(
&mut state_stream,
want_state,
)
.on_timeout(
(EXPECTED_DAD_RETRANSMIT_TIMER * EXPECTED_DUP_ADDR_DETECT_TRANSMITS
+ ASYNC_EVENT_POSITIVE_CHECK_TIMEOUT)
.after_now(),
|| panic!("timed out waiting for address assignment state"),
)
.await?;
}
Ok(address_state_provider)
}
let sandbox = netemul::TestSandbox::new().expect("failed to create sandbox");
let (_network, realm, _netstack, iface, fake_ep) =
setup_network::<E>(&sandbox, name).await.expect("error setting up network");
let debug_control = realm
.connect_to_protocol::<fidl_fuchsia_net_debug::InterfacesMarker>()
.expect("failed to connect to fuchsia.net.debug/Interfaces");
let (control, server) =
fidl::endpoints::create_proxy::<fidl_fuchsia_net_interfaces_admin::ControlMarker>()
.expect("create proxy");
let () = debug_control
.get_admin(iface.id(), server)
.expect("fuchsia.net.debug/Interfaces.GetAdmin failed");
// Add an address and expect it to fail DAD because we simulate another node
// performing DAD at the same time.
assert_matches::assert_matches!(
add_address_for_dad(
&iface,
&fake_ep,
&control,
fail_dad_with_ns,
fidl_fuchsia_net_interfaces_admin::AddressAssignmentState::Assigned
)
.await,
Err(fidl_fuchsia_net_interfaces_ext::admin::AddressStateProviderError::AddressRemoved(
fidl_fuchsia_net_interfaces_admin::AddressRemovalReason::DadFailed
))
);
// Add an address and expect it to fail DAD because we simulate another node
// already owning the address.
assert_matches::assert_matches!(
add_address_for_dad(
&iface,
&fake_ep,
&control,
fail_dad_with_na,
fidl_fuchsia_net_interfaces_admin::AddressAssignmentState::Assigned
)
.await,
Err(fidl_fuchsia_net_interfaces_ext::admin::AddressStateProviderError::AddressRemoved(
fidl_fuchsia_net_interfaces_admin::AddressRemovalReason::DadFailed
))
);
{
// Add the address, and make sure it gets assigned.
let address_state_provider = add_address_for_dad(
&iface,
&fake_ep,
&control,
|_, _| async {},
fidl_fuchsia_net_interfaces_admin::AddressAssignmentState::Assigned,
)
.await
.expect("DAD should have succeeded");
// Disable the interface, ensure that the address becomes unavailable.
let did_disable = iface.control().disable().await.expect("send disable").expect("disable");
assert!(did_disable);
let state_stream = fidl_fuchsia_net_interfaces_ext::admin::assignment_state_stream(
address_state_provider.clone(),
);
futures::pin_mut!(state_stream);
fidl_fuchsia_net_interfaces_ext::admin::wait_assignment_state(
&mut state_stream,
fidl_fuchsia_net_interfaces_admin::AddressAssignmentState::Unavailable,
)
.await
.expect("failed to wait for address to be UNAVAILBALE");
let removed = control
.remove_address(&mut net::InterfaceAddress::Ipv6(net::Ipv6Address {
addr: ipv6_consts::LINK_LOCAL_ADDR.ipv6_bytes(),
}))
.await
.expect("FIDL error removing address")
.expect("failed to remove address");
assert!(removed);
}
// Add the address while the interface is down.
let address_state_provider = add_address_for_dad(
&iface,
&fake_ep,
&control,
|_, _| async {},
fidl_fuchsia_net_interfaces_admin::AddressAssignmentState::Unavailable,
)
.await
.expect("DAD should have succeeded");
// Re-enable the interface, DAD should run.
let did_enable = iface.control().enable().await.expect("send enable").expect("enable");
assert!(did_enable);
expect_dad_neighbor_solicitation(&fake_ep).await;
let state_stream = fidl_fuchsia_net_interfaces_ext::admin::assignment_state_stream(
address_state_provider.clone(),
);
futures::pin_mut!(state_stream);
fidl_fuchsia_net_interfaces_ext::admin::wait_assignment_state(
&mut state_stream,
fidl_fuchsia_net_interfaces_admin::AddressAssignmentState::Assigned,
)
.await
.expect("failed to wait for address to be ASSIGNED");
let interface_state = realm
.connect_to_protocol::<fidl_fuchsia_net_interfaces::StateMarker>()
.expect("failed to connect to fuchsia.net.interfaces/State");
fidl_fuchsia_net_interfaces_ext::wait_interface_with_id(
fidl_fuchsia_net_interfaces_ext::event_stream_from_state(&interface_state)
.expect("error getting interfaces state event stream"),
&mut fidl_fuchsia_net_interfaces_ext::InterfaceState::Unknown(iface.id()),
|fidl_fuchsia_net_interfaces_ext::Properties { addresses, .. }| {
addresses.iter().find_map(
|&fidl_fuchsia_net_interfaces_ext::Address {
addr: fidl_fuchsia_net::Subnet { addr, prefix_len: _ },
valid_until: _,
}| {
match addr {
net::IpAddress::Ipv6(net::Ipv6Address { addr }) => {
(addr == ipv6_consts::LINK_LOCAL_ADDR.ipv6_bytes()).then(|| ())
}
net::IpAddress::Ipv4(_) => None,
}
},
)
},
)
.map_err(anyhow::Error::from)
.on_timeout(ASYNC_EVENT_POSITIVE_CHECK_TIMEOUT.after_now(), || {
Err(anyhow::anyhow!("timed out"))
})
.await
.expect("error waiting for address to be assigned")
}
/// Tests to make sure default router discovery, prefix discovery and more-specific
/// route discovery works.
#[variants_test]
#[test_case("host", false ; "host")]
#[test_case("router", true ; "router")]
async fn on_and_off_link_route_discovery<E: netemul::Endpoint>(
test_name: &str,
sub_test_name: &str,
forwarding: bool,
) {
pub const SUBNET_WITH_MORE_SPECIFIC_ROUTE: net_types_ip::Subnet<net_types_ip::Ipv6Addr> = unsafe {
net_types_ip::Subnet::new_unchecked(
net_types_ip::Ipv6Addr::new([0xa001, 0xf1f0, 0x4060, 0x0001, 0, 0, 0, 0]),
64,
)
};
async fn check_route_table(
stack: &net_stack::StackProxy,
want_routes: &[net_stack::ForwardingEntry],
) |
let name = format!("{}_{}", test_name, sub_test_name);
let name = name.as_str();
let sandbox = netemul::TestSandbox::new().expect("failed to create sandbox");
let (_network, realm, _netstack, iface, fake_ep) =
setup_network::<E>(&sandbox, name).await.expect("failed to setup network");
let stack =
realm.connect_to_protocol::<net_stack::StackMarker>().expect("failed to get stack proxy");
if forwarding {
let () = stack.enable_ip_forwarding().await.expect("error enabling IP forwarding");
}
let options = [
NdpOptionBuilder::PrefixInformation(PrefixInformation::new(
ipv6_consts::PREFIX.prefix(), /* prefix_length */
true, /* on_link_flag */
false, /* autonomous_address_configuration_flag */
6234, /* valid_lifetime */
0, /* preferred_lifetime */
ipv6_consts::PREFIX.network(), /* prefix */
)),
NdpOptionBuilder::RouteInformation(RouteInformation::new(
SUBNET_WITH_MORE_SPECIFIC_ROUTE,
1337, /* route_lifetime_seconds */
RoutePreference::default(),
)),
];
let () = send_ra_with_router_lifetime(&fake_ep, 1234, &options)
.await
.expect("failed to send router advertisement");
let nicid = iface.id();
check_route_table(
&stack,
&[
// Test that a default route through the router is installed.
net_stack::ForwardingEntry {
subnet: net::Subnet {
addr: net::IpAddress::Ipv6(net::Ipv6Address {
addr: net_types_ip::Ipv6::UNSPECIFIED_ADDRESS.ipv6_bytes(),
}),
prefix_len: 0,
},
device_id: nicid,
next_hop: Some(Box::new(net::IpAddress::Ipv6(net::Ipv6Address {
addr: ipv6_consts::LINK_LOCAL_ADDR.ipv6_bytes(),
}))),
metric: 0,
},
// Test that a route to `SUBNET_WITH_MORE_SPECIFIC_ROUTE` exists through the router.
net_stack::ForwardingEntry {
subnet: net::Subnet {
addr: net::IpAddress::Ipv6(net::Ipv6Address {
addr: SUBNET_WITH_MORE_SPECIFIC_ROUTE.network().ipv6_bytes(),
}),
prefix_len: SUBNET_WITH_MORE_SPECIFIC_ROUTE.prefix(),
},
device_id: nicid,
next_hop: Some(Box::new(net::IpAddress::Ipv6(net::Ipv6Address {
addr: ipv6_consts::LINK_LOCAL_ADDR.ipv6_bytes(),
}))),
metric: 0,
},
// Test that the prefix should be discovered after it is advertised.
net_stack::ForwardingEntry {
subnet: net::Subnet {
addr: net::IpAddress::Ipv6(net::Ipv6Address {
addr: ipv6_consts::PREFIX.network().ipv6_bytes(),
}),
prefix_len: ipv6_consts::PREFIX.prefix(),
},
device_id: nicid,
next_hop: None,
metric: 0,
},
][..],
)
.await
}
#[variants_test]
async fn slaac_regeneration_after_dad_failure<E: netemul::Endpoint>(name: &str) {
// Expects an NS message for DAD within timeout and returns the target address of the message.
async fn expect_ns_message_in(
fake_ep: &netemul::TestFakeEndpoint<'_>,
timeout: zx::Duration,
) -> net_types_ip::Ipv6Addr {
fake_ep
.frame_stream()
.try_filter_map(|(data, dropped)| {
assert_eq!(dropped, 0);
future::ok(
parse_icmp_packet_in_ip_packet_in_ethernet_frame::<
net_types_ip::Ipv6,
_,
NeighborSolicitation,
_,
>(&data, |p| assert_eq!(p.body().iter().count(), 0))
.map_or(None, |(_src_mac, _dst_mac, _src_ip, _dst_ip, _ttl, message, _code)| {
// If the NS target_address does not have the prefix we have advertised,
// this is for some other address. We ignore it as it is not relevant to
// our test.
if !ipv6_consts::PREFIX.contains(message.target_address()) {
return None;
}
Some(*message.target_address())
}),
)
})
.try_next()
.map(|r| r.context("error getting OnData event"))
.on_timeout(timeout.after_now(), || {
Err(anyhow::anyhow!(
"timed out waiting for a neighbor solicitation targetting address of prefix: {}",
ipv6_consts::PREFIX,
))
})
.await.unwrap().expect("failed to get next OnData event")
}
let sandbox = netemul::TestSandbox::new().expect("failed to create sandbox");
let (_network, realm, _netstack, iface, fake_ep) =
setup_network_with::<E, _>(&sandbox, name, &[KnownServiceProvider::SecureStash])
.await
.expect("error setting up network");
// Send a Router Advertisement with information for a SLAAC prefix.
let ra = RouterAdvertisement::new(
0, /* current_hop_limit */
false, /* managed_flag */
false, /* other_config_flag */
0, /* router_lifetime */
0, /* reachable_time */
0, /* retransmit_timer */
);
let pi = PrefixInformation::new(
ipv6_consts::PREFIX.prefix(), /* prefix_length */
false, /* on_link_flag */
true, /* autonomous_address_configuration_flag */
99999, /* valid_lifetime */
99999, /* preferred_lifetime */
ipv6_consts::PREFIX.network(), /* prefix */
);
let options = [NdpOptionBuilder::PrefixInformation(pi)];
let () = write_ndp_message::<&[u8], _>(
eth_consts::MAC_ADDR,
Mac::from(&net_types_ip::Ipv6::ALL_NODES_LINK_LOCAL_MULTICAST_ADDRESS),
ipv6_consts::LINK_LOCAL_ADDR,
net_types_ip::Ipv6::ALL_NODES_LINK_LOCAL_MULTICAST_ADDRESS.get(),
ra,
&options,
&fake_ep,
)
.await
.expect("failed to write RA message");
let tried_address = expect_ns_message_in(&fake_ep, ASYNC_EVENT_POSITIVE_CHECK_TIMEOUT).await;
// We pretend there is a duplicate address situation.
let snmc = tried_address.to_solicited_node_address();
let () = write_ndp_message::<&[u8], _>(
eth_consts::MAC_ADDR,
Mac::from(&snmc),
net_types_ip::Ipv6::UNSPECIFIED_ADDRESS,
snmc.get(),
NeighborSolicitation::new(tried_address),
&[],
&fake_ep,
)
.await
.expect("failed to write DAD message");
let target_address =
expect_ns_message_in(&fake_ep, DAD_IDGEN_DELAY + ASYNC_EVENT_POSITIVE_CHECK_TIMEOUT).await;
// We expect two addresses for the SLAAC prefixes to be assigned to the NIC as the
// netstack should generate both a stable and temporary SLAAC address.
let expected_addrs = 2;
let interface_state = realm
.connect_to_protocol::<fidl_fuchsia_net_interfaces::StateMarker>()
.expect("failed to connect to fuchsia.net.interfaces/State");
let () = fidl_fuchsia_net_interfaces_ext::wait_interface_with_id(
fidl_fuchsia_net_interfaces_ext::event_stream_from_state(&interface_state)
.expect("error getting interfaces state event stream"),
&mut fidl_fuchsia_net_interfaces_ext::InterfaceState::Unknown(iface.id()),
|fidl_fuchsia_net_interfaces_ext::Properties { addresses, .. }| {
// We have to make sure 2 things:
// 1. We have `expected_addrs` addrs which have the advertised prefix for the
// interface.
// 2. The last tried address should be among the addresses for the interface.
let (slaac_addrs, has_target_addr) = addresses.iter().fold(
(0, false),
|(mut slaac_addrs, mut has_target_addr),
&fidl_fuchsia_net_interfaces_ext::Address {
addr: fidl_fuchsia_net::Subnet { addr, prefix_len: _ },
valid_until: _,
}| {
match addr {
net::IpAddress::Ipv6(net::Ipv6Address { addr }) => {
let configured_addr = net_types_ip::Ipv6Addr::from_bytes(addr);
assert_ne!(
configured_addr, tried_address,
"address which previously failed DAD was assigned"
);
if ipv6_consts::PREFIX.contains(&configured_addr) {
slaac_addrs += 1;
}
if configured_addr == target_address {
has_target_addr = true;
}
}
net::IpAddress::Ipv4(_) => {}
}
(slaac_addrs, has_target_addr)
},
);
assert!(
slaac_addrs <= expected_addrs,
"more addresses found than expected, found {}, expected {}",
slaac_addrs,
expected_addrs
);
if slaac_addrs == expected_addrs && has_target_addr {
Some(())
} else {
None
}
},
)
.map_err(anyhow::Error::from)
.on_timeout(
(EXPECTED_DAD_RETRANSMIT_TIMER * EXPECTED_DUP_ADDR_DETECT_TRANSMITS * expected_addrs
+ ASYNC_EVENT_POSITIVE_CHECK_TIMEOUT)
.after_now(),
|| Err(anyhow::anyhow!("timed out")),
)
.await
.expect("failed to wait for SLAAC addresses");
}
#[variants_test]
async fn sends_mld_reports<E: netemul::Endpoint>(name: &str) {
let sandbox = netemul::TestSandbox::new().expect("error creating sandbox");
let (_network, _realm, _netstack, iface, fake_ep) =
setup_network::<E>(&sandbox, name).await.expect("error setting up networking");
// Add an address so we join the address's solicited node multicast group.
let () = iface
.add_ip_addr(net::Subnet {
addr: net::IpAddress::Ipv6(net::Ipv6Address {
addr: ipv6_consts::LINK_LOCAL_ADDR.ipv6_bytes(),
}),
prefix_len: 64,
})
.await
.expect("error adding IP address");
let snmc = ipv6_consts::LINK_LOCAL_ADDR.to_solicited_node_address();
let stream = fake_ep
.frame_stream()
.map(|r| r.context("error getting OnData event"))
.try_filter_map(|(data, dropped)| {
async move {
assert_eq!(dropped, 0);
let mut data = &data[..];
let eth = EthernetFrame::parse(&mut data, EthernetFrameLengthCheck::Check)
.expect("error parsing ethernet frame");
if eth.ethertype() != Some(EtherType::Ipv6) {
// Ignore non-IPv6 packets.
return Ok(None);
}
let (mut payload, src_ip, dst_ip, proto, ttl) =
parse_ip_packet::<net_types_ip::Ipv6>(&data)
.expect("error parsing IPv6 packet");
if proto != Ipv6Proto::Icmpv6 {
// Ignore non-ICMPv6 packets.
return Ok(None);
}
let icmp = Icmpv6Packet::parse(&mut payload, IcmpParseArgs::new(src_ip, dst_ip))
.expect("error parsing ICMPv6 packet");
let mld = if let Icmpv6Packet::Mld(mld) = icmp {
mld
} else {
// Ignore non-MLD packets.
return Ok(None);
};
// As per RFC 3590 section 4,
//
// MLD Report and Done messages are sent with a link-local address as
// the IPv6 source address, if a valid address is available on the
// interface. If a valid link-local address is not available (e.g., one
// has not been configured), the message is sent with the unspecified
// address (::) as the IPv6 source address.
assert!(!src_ip.is_specified() || src_ip.is_link_local(), "MLD messages must be sent from the unspecified or link local address; src_ip = {}", src_ip);
assert!(dst_ip.is_multicast(), "all MLD messages must be sent to a multicast address; dst_ip = {}", dst_ip);
// As per RFC 2710 section 3,
//
// All MLD messages described in this document are sent with a
// link-local IPv6 Source Address, an IPv6 Hop Limit of 1, ...
assert_eq!(ttl, 1, "MLD messages must have a hop limit of 1");
let report = if let MldPacket::MulticastListenerReport(report) = mld {
report
} else {
// Ignore non-report messages.
return Ok(None);
};
let group_addr = report.body().group_addr;
assert!(group_addr.is_multicast(), "MLD reports must only be sent for multicast addresses; group_addr = {}", group_addr);
if group_addr != *snmc {
// We are only interested in the report for the solicited node
// multicast group we joined.
return Ok(None);
}
assert_eq!(dst_ip, group_addr, "the destination of an MLD report should be the multicast group the report is for");
Ok(Some(()))
}
});
futures::pin_mut!(stream);
let () = stream
.try_next()
.on_timeout(ASYNC_EVENT_POSITIVE_CHECK_TIMEOUT.after_now(), || {
return Err(anyhow::anyhow!("timed out waiting for the MLD report"));
})
.await
.unwrap()
.expect("error getting our expected MLD report");
}
| {
let check_attempts = ASYNC_EVENT_POSITIVE_CHECK_TIMEOUT.into_seconds()
/ ASYNC_EVENT_CHECK_INTERVAL.into_seconds();
for attempt in 0..check_attempts {
let () = sleep(ASYNC_EVENT_CHECK_INTERVAL.into_seconds()).await;
let route_table =
stack.get_forwarding_table().await.expect("failed to get route table");
if want_routes.iter().all(|route| route_table.contains(route)) {
return;
}
println!("route table at attempt={}:\n{:?}", attempt, route_table);
}
panic!(
"timed out on waiting for a route table entry after {} seconds",
ASYNC_EVENT_POSITIVE_CHECK_TIMEOUT.into_seconds(),
)
} |
lib.rs | use std::thread;
use num_integer::Roots;
use num_cpus::get;
use indicatif::ProgressBar;
use ringbuf::RingBuffer;
use std::time::Duration;
pub fn finds_if_number_is_prime(number_to_check:u128) -> u128 {
if number_to_check == 1 {
return 0;
}
else {
let number_of_threads:u128 = ((get()) as u128) * 2;
// println!("spinning up {} threads", number_of_threads);
let progress_bar = ProgressBar::new(number_to_check.sqrt().try_into().unwrap());
let mut count2:u128 = 1;
let mut thread_group_ring_buffers_divisor = vec![];
let mut thread_group_ring_buffers_stop = vec![];
let mut thread_group_ring_buffers_work = vec![];
let mut threads_group = vec![];
for thread_number in 0..number_of_threads {
// let progress_bar_clone = progress_bar.clone();
let this_thread_ring_buffer_divisor = RingBuffer::<u128>::new(1);
let this_thread_ring_buffer_stop = RingBuffer::<bool>::new(1);
let this_thread_ring_buffer_work = RingBuffer::<u128>::new(1024);
let (mut this_thread_ring_buffer_divisor_write, this_thread_ring_buffer_divisor_read) = this_thread_ring_buffer_divisor.split();
let (this_thread_ring_buffer_stop_write, this_thread_ring_buffer_stop_read) = this_thread_ring_buffer_stop.split();
let (mut this_thread_ring_buffer_work_write, this_thread_ring_buffer_work_read) = this_thread_ring_buffer_work.split();
thread_group_ring_buffers_divisor.push(this_thread_ring_buffer_divisor_read);
thread_group_ring_buffers_stop.push(this_thread_ring_buffer_stop_write);
thread_group_ring_buffers_work.push(this_thread_ring_buffer_work_read);
threads_group.push(thread::spawn(move || {
let root:u128 = number_to_check.sqrt().try_into().unwrap();
let mut count = 3 + (thread_number * 2);
if 2 > root {
this_thread_ring_buffer_divisor_write.push(1).unwrap();
return (true, 1);
}
if number_to_check % 2 == 0 {
this_thread_ring_buffer_divisor_write.push(2).unwrap();
return (false, 2);
}
loop {
if count > root {
this_thread_ring_buffer_work_write.push(count2 * 2).unwrap();
thread::sleep(Duration::from_millis(100));
this_thread_ring_buffer_divisor_write.push(1).unwrap();
return (true, 1);
}
if number_to_check % count == 0 {
this_thread_ring_buffer_divisor_write.push(count).unwrap();
return (false, count);
}
if count2 != 0 {
}
else {
if this_thread_ring_buffer_stop_read.is_empty() |
else {
println!("recieved stop command, thread {} stopping",thread_number);
return (false, 0);
}
this_thread_ring_buffer_work_write.push(2097152).unwrap();
//progress_bar_clone.inc(4194304);
}
count = count + (number_of_threads * 2);
count2 = (count2 + 1) & 1048575;
}
}));
}
// let mut andy:bool = true;
let mut divisor:u128 = 0;
let mut received = 0;
let mut done_threads = vec![];
// println!("threads started");
loop {
// thread::sleep(Duration::from_millis(10));
// let mut done_threads = vec![];
let mut all_done = false;
for this_thread_ring_buffer_divisor_read in &mut thread_group_ring_buffers_divisor {
if this_thread_ring_buffer_divisor_read.is_empty() {
// thread::sleep(Duration::from_millis(10))
}
else {
received = this_thread_ring_buffer_divisor_read.pop().unwrap();
if received > 1 {
break
}
if received == 1 {
done_threads.push(1);
// progress_bar.tick();
// thread::sleep(Duration::from_millis(10));
let mut count = 0;
for &mut thread in &mut done_threads {
count = count + thread
}
if count == number_of_threads {
all_done = true;
break
}
}
}
}
if received > 1 {
break
}
for this_thread_ring_buffer_work_read in &mut thread_group_ring_buffers_work {
if this_thread_ring_buffer_work_read.is_empty() {
// thread::sleep(Duration::from_millis(10));
// progress_bar.tick();
}
else {
let work_progress = this_thread_ring_buffer_work_read.pop().unwrap();
progress_bar.inc(work_progress.try_into().unwrap())
}
}
if all_done {
// progress_bar.finish();
break
}
}
if received != 0 {
for this_thread_ring_buffer_stop_write in &mut thread_group_ring_buffers_stop {
match this_thread_ring_buffer_stop_write.push(true) {
Ok(_) => {}
Err(_) => {
println!("there was a problem telling a thread to stop!")
}
};
}
divisor = received;
//andy = false;
}
else {
println!("there was an issue recieving the divisor from a thread");
}
//}
return divisor
}
}
pub fn collect_primes(list_of_primes:&Vec<u128>) -> Vec<u128> {
let number_of_threads = get() as u128 * 2;
//println!("number of threads is {}",number_of_threads);
let next_number = list_of_primes.last().unwrap() + 2;
let mut threads_group = vec![];
//let mut threads_group_ring_buffer_worker_send = vec![];
//let mut threads_group_ring_buffer_worker_receive = vec![];
for this_thread in 0..number_of_threads {
//let this_thread_ring_buffer_worker_receive = RingBuffer::<Vec>::new
let next_number = next_number.clone();
let mut list_of_new_primes = vec![];
let og_list_of_primes = list_of_primes.clone();
threads_group.push(thread::spawn (move || {
let mut this_thread_next_number = next_number + this_thread * 2;
//println!("this is thread {} starting at {}",this_thread,this_thread_next_number);
loop {
let root = (this_thread_next_number).sqrt();
if *og_list_of_primes.last().unwrap() >= root + 1 {
let mut prime:bool = true;
let mut place = 0;
let mut checking = 2;
loop {
//println!("cheching is {} and next number is {}",checking,this_thread_next_number);
checking = og_list_of_primes[place];
if checking <= root {
}
else {
prime = true;
break
}
if this_thread_next_number % checking != 0 {
}
else {
prime = false;
break
}
place = place + 1
}
if prime {
list_of_new_primes.push(this_thread_next_number);
//println!("{}",this_thread_next_number);
}
this_thread_next_number = this_thread_next_number + number_of_threads * 2
}
else {
break
}
}
return list_of_new_primes
}))
}
let mut list = vec![];
for this_thread in threads_group {
let this_thread_list = this_thread.join().unwrap();
for prime in this_thread_list {
list.push(prime)
}
}
list.sort();
return list
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_smalls() {
let smalls_prime_chart:Vec<bool> = vec![false, true, true, false, true, false, true, false, false, false, true, false, true, false, false, false, true, false, true, false, false, false, true, false, false, false, false, false, true, false, true, false, false, false, false, false, true, false, false, false, true, false, true, false, false, false, true, false, false, false, false, false, true, false, false, false, false, false, true, false, true, false, false, false, false, false, true, false, false, false, true, false, true, false, false, false, false, false, true, false, false, false, true, false, false, false, false, false, true, false, false, false, false, false, false, false, true, false, false, false];
for checking in 1..((smalls_prime_chart.len())+1) {
let divisor = finds_if_number_is_prime(checking.try_into().unwrap());
let mut primeiness = false;
if divisor != 1 {
primeiness = false;
}
else {
primeiness = true;
}
println!("{}", checking);
assert_eq!(smalls_prime_chart[(checking - 1)], primeiness);
}
}
#[test]
fn test_problems(){
let problems_list:Vec<u128> = vec![29873456,1145627248201741];
let problems_prime_chart:Vec<bool> = vec![false, true];
for checking in 1..((problems_prime_chart.len())+1) {
let divisor = finds_if_number_is_prime((problems_list[(checking - 1)]).try_into().unwrap());
let mut primeiness = false;
if divisor != 1 {
primeiness = false;
}
else {
primeiness = true;
}
println!("{}", checking);
assert_eq!(problems_prime_chart[(checking - 1)], primeiness);
}
}
}
| {
} |
errors_test.go | /*
Copyright 2019 VMware, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cli_test
import (
"errors"
"fmt"
"testing"
cli "github.com/vmware-tanzu/apps-cli-plugin/pkg/cli-runtime"
)
func TestSilenceError(t *testing.T) | {
err := fmt.Errorf("test error")
silentErr := cli.SilenceError(err)
if errors.Is(err, cli.SilentError) {
t.Errorf("expected error to not be silent, got %#v", err)
}
if !errors.Is(silentErr, cli.SilentError) {
t.Errorf("expected error to be silent, got %#v", err)
}
if expected, actual := err, errors.Unwrap(silentErr); expected != actual {
t.Errorf("errors expected to match, expected %v, actually %v", expected, actual)
}
if expected, actual := err.Error(), silentErr.Error(); expected != actual {
t.Errorf("errors expected to match, expected %q, actually %q", expected, actual)
}
} |
|
__init__.py | #!/usr/bin/env python3.7
import logging
from obfuscapk import obfuscator_category
from obfuscapk.obfuscation import Obfuscation
class NewSignature(obfuscator_category.ITrivialObfuscator):
def __init__(self):
self.logger = logging.getLogger('{0}.{1}'.format(__name__, self.__class__.__name__))
super().__init__()
def obfuscate(self, obfuscation_info: Obfuscation):
self.logger.info('Running "{0}" obfuscator'.format(self.__class__.__name__))
try:
obfuscation_info.sign_obfuscated_apk() | self.logger.error('Error during execution of "{0}" obfuscator: {1}'.format(self.__class__.__name__, e))
raise
finally:
obfuscation_info.used_obfuscators.append(self.__class__.__name__) | except Exception as e: |
zkp.rs | //! Implementation of the Unit Vector ZK argument presented by
//! Zhang, Oliynykov and Balogum in
//! ["A Treasury System for Cryptocurrencies: Enabling Better Collaborative Intelligence"](https://www.ndss-symposium.org/wp-content/uploads/2019/02/ndss2019_02A-2_Zhang_paper.pdf).
//! We use the notation presented in the technical
//! [spec](https://github.com/input-output-hk/treasury-crypto/blob/master/docs/voting_protocol_spec/Treasury_voting_protocol_spec.pdf),
//! written by Dmytro Kaidalov.
use crate::{GroupElement, Scalar};
use chain_core::mempack::{ReadBuf, ReadError};
use rand_core::{CryptoRng, RngCore};
use {rand::thread_rng, std::iter};
use super::challenge_context::ChallengeContext;
use super::messages::{generate_polys, Announcement, BlindingRandomness, ResponseRandomness};
use crate::cryptography::CommitmentKey;
use crate::cryptography::{Ciphertext, PublicKey};
use crate::encrypted_vote::{binrep, Ptp, UnitVector};
use crate::tally::Crs;
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct Zkp {
/// Commitment to the proof randomness and bits of binary representaion of `i`
ibas: Vec<Announcement>,
/// Encryption to the polynomial coefficients used in the proof
ds: Vec<Ciphertext>,
/// Response related to the randomness committed in `ibas`
zwvs: Vec<ResponseRandomness>,
/// Final response
r: Scalar,
}
#[allow(clippy::len_without_is_empty)]
impl Zkp {
/// Generate a unit vector proof. In this proof, a prover encrypts each entry of a
/// vector `unit_vector`, and proves
/// that the vector is a unit vector. In particular, it proves that it is the `i`th unit
/// vector without disclosing `i`.
/// Common Reference String (`Crs`): Pedersen Commitment Key
/// Statement: public key `pk`, and ciphertexts `ciphertexts`
/// C_0=Enc_pk(r_0; v_0), ..., C_{m-1}=Enc_pk(r_{m-1}; v_{m-1})
/// Witness: the unit vector `unit_vector`, and randomness used for
/// encryption `encryption_randomness`.
///
/// The proof communication complexity is logarithmic with respect to the size of
/// the encrypted tuple. Description of the proof available in Figure 8.
pub(crate) fn generate<R: RngCore + CryptoRng>(
rng: &mut R,
crs: &Crs,
public_key: &PublicKey,
unit_vector: &UnitVector,
encryption_randomness: &[Scalar],
ciphertexts: &[Ciphertext],
) -> Self {
let ck = CommitmentKey::from(crs.clone());
let ciphers = Ptp::new(ciphertexts.to_vec(), Ciphertext::zero);
let cipher_randoms = Ptp::new(encryption_randomness.to_vec(), Scalar::zero);
assert_eq!(ciphers.bits(), cipher_randoms.bits());
let bits = ciphers.bits();
let mut blinding_randomness_vec = Vec::with_capacity(bits);
let mut first_announcement_vec = Vec::with_capacity(bits);
let idx_binary_rep = binrep(unit_vector.ith(), bits as u32);
for &i in idx_binary_rep.iter() {
let (b_rand, ann) = BlindingRandomness::gen_and_commit(&ck, i, rng);
blinding_randomness_vec.push(b_rand);
first_announcement_vec.push(ann);
}
// Generate First verifier challenge
let mut cc = ChallengeContext::new(&ck, public_key, ciphers.as_ref());
let cy = cc.first_challenge(&first_announcement_vec);
let (poly_coeff_enc, rs) = {
let pjs = generate_polys(
ciphers.len(),
&idx_binary_rep,
bits,
&blinding_randomness_vec,
);
// Generate new Rs for Ds
let mut rs = Vec::with_capacity(bits);
let mut ds = Vec::with_capacity(bits);
for i in 0..bits {
let sum =
cy.exp_iter()
.zip(pjs.iter())
.fold(Scalar::zero(), |sum, (c_pows, pj)| {
let s = sum + c_pows * pj.get_coefficient_at(i);
s
});
let (d, r) = public_key.encrypt_return_r(&sum, rng);
ds.push(d);
rs.push(r);
}
(ds, rs)
};
// Generate second verifier challenge
let cx = cc.second_challenge(&poly_coeff_enc);
// Compute ZWVs
let randomness_response_vec = blinding_randomness_vec
.iter()
.zip(idx_binary_rep.iter())
.map(|(abcd, index)| abcd.gen_response(&cx, index))
.collect::<Vec<_>>();
// Compute R
let response = {
let cx_pow = cx.power(cipher_randoms.bits());
let p1 = cipher_randoms.iter().zip(cy.exp_iter()).fold(
Scalar::zero(),
|acc, (r, cy_pows)| {
let el = r * &cx_pow * cy_pows;
el + acc
},
);
let p2 = rs
.iter()
.zip(cx.exp_iter())
.fold(Scalar::zero(), |acc, (r, cx_pows)| {
let el = r * cx_pows;
el + acc
});
p1 + p2
};
Zkp {
ibas: first_announcement_vec,
ds: poly_coeff_enc,
zwvs: randomness_response_vec,
r: response,
}
}
/// Verify a unit vector proof. The verifier checks that the plaintexts encrypted in `ciphertexts`,
/// under `public_key` represent a unit vector.
/// Common Reference String (`crs`): Pedersen Commitment Key
/// Statement: public key `pk`, and ciphertexts `ciphertexts`
/// C_0=Enc_pk(r_0; v_0), ..., C_{m-1}=Enc_pk(r_{m-1}; v_{m-1})
///
/// Description of the verification procedure available in Figure 9.
pub fn verify(&self, crs: &Crs, public_key: &PublicKey, ciphertexts: &[Ciphertext]) -> bool {
let ck = CommitmentKey::from(crs.clone());
let ciphertexts = Ptp::new(ciphertexts.to_vec(), Ciphertext::zero);
let bits = ciphertexts.bits();
let mut cc = ChallengeContext::new(&ck, public_key, ciphertexts.as_ref());
let cy = cc.first_challenge(&self.ibas);
let cx = cc.second_challenge(&self.ds);
if self.ibas.len() != bits {
return false;
}
if self.zwvs.len() != bits {
return false;
}
self.verify_statements(public_key, &ck, &ciphertexts, &cx, &cy)
}
/// Final verification of the proof, that we compute in a single vartime multiscalar
/// multiplication.
fn verify_statements(
&self,
public_key: &PublicKey,
commitment_key: &CommitmentKey,
ciphertexts: &Ptp<Ciphertext>,
challenge_x: &Scalar,
challenge_y: &Scalar,
) -> bool {
let bits = ciphertexts.bits();
let length = ciphertexts.len();
let cx_pow = challenge_x.power(bits);
let powers_cx = challenge_x.exp_iter();
let powers_cy = challenge_y.exp_iter();
let powers_z_iterator = powers_z_encs_iter(&self.zwvs, challenge_x, &(bits as u32));
let zero = public_key.encrypt_with_r(&Scalar::zero(), &self.r);
// Challenge value for batching two equations into a single multiscalar mult.
let batch_challenge = Scalar::random(&mut thread_rng());
for (zwv, iba) in self.zwvs.iter().zip(self.ibas.iter()) {
if GroupElement::vartime_multiscalar_multiplication(
iter::once(zwv.z.clone())
.chain(iter::once(&zwv.w + &batch_challenge * &zwv.v))
.chain(iter::once(
&batch_challenge * (&zwv.z - challenge_x) - challenge_x,
))
.chain(iter::once(Scalar::one().negate()))
.chain(iter::once(batch_challenge.negate())),
iter::once(GroupElement::generator())
.chain(iter::once(commitment_key.h.clone()))
.chain(iter::once(iba.i.clone()))
.chain(iter::once(iba.b.clone()))
.chain(iter::once(iba.a.clone())),
) != GroupElement::zero()
{
return false;
}
}
let mega_check = GroupElement::vartime_multiscalar_multiplication(
powers_cy
.clone()
.take(length)
.map(|s| s * &cx_pow)
.chain(powers_cy.clone().take(length).map(|s| s * &cx_pow))
.chain(powers_cy.take(length))
.chain(powers_cx.clone().take(bits))
.chain(powers_cx.take(bits))
.chain(iter::once(Scalar::one().negate()))
.chain(iter::once(Scalar::one().negate())),
ciphertexts
.iter()
.map(|ctxt| ctxt.e2.clone())
.chain(ciphertexts.iter().map(|ctxt| ctxt.e1.clone()))
.chain(powers_z_iterator.take(length))
.chain(self.ds.iter().map(|ctxt| ctxt.e1.clone()))
.chain(self.ds.iter().map(|ctxt| ctxt.e2.clone()))
.chain(iter::once(zero.e1.clone()))
.chain(iter::once(zero.e2)),
);
mega_check == GroupElement::zero()
}
/// Try to generate a `Proof` from a buffer
pub fn from_buffer(buf: &mut ReadBuf) -> Result<Self, ReadError> {
let bits = buf.get_u8()? as usize;
let mut ibas = Vec::with_capacity(bits);
for _ in 0..bits {
let elem_buf = buf.get_slice(Announcement::BYTES_LEN)?;
let iba = Announcement::from_bytes(elem_buf)
.ok_or_else(|| ReadError::StructureInvalid("Invalid IBA component".to_string()))?;
ibas.push(iba);
}
let mut bs = Vec::with_capacity(bits);
for _ in 0..bits {
let elem_buf = buf.get_slice(Ciphertext::BYTES_LEN)?;
let ciphertext = Ciphertext::from_bytes(elem_buf).ok_or_else(|| {
ReadError::StructureInvalid("Invalid encoded ciphertext".to_string())
})?;
bs.push(ciphertext);
}
let mut zwvs = Vec::with_capacity(bits);
for _ in 0..bits {
let elem_buf = buf.get_slice(ResponseRandomness::BYTES_LEN)?;
let zwv = ResponseRandomness::from_bytes(elem_buf)
.ok_or_else(|| ReadError::StructureInvalid("Invalid ZWV component".to_string()))?;
zwvs.push(zwv);
}
let r_buf = buf.get_slice(Scalar::BYTES_LEN)?;
let r = Scalar::from_bytes(r_buf).ok_or_else(|| {
ReadError::StructureInvalid("Invalid Proof encoded R scalar".to_string())
})?;
Ok(Self::from_parts(ibas, bs, zwvs, r))
}
/// Constructs the proof structure from constituent parts.
///
/// # Panics
///
/// The `ibas`, `ds`, and `zwvs` must have the same length, otherwise the function will panic.
pub fn from_parts(
ibas: Vec<Announcement>,
ds: Vec<Ciphertext>,
zwvs: Vec<ResponseRandomness>,
r: Scalar,
) -> Self {
assert_eq!(ibas.len(), ds.len());
assert_eq!(ibas.len(), zwvs.len());
Zkp { ibas, ds, zwvs, r }
}
/// Returns the length of the size of the witness vector
pub fn len(&self) -> usize {
self.ibas.len()
}
/// Return an iterator of the announcement commitments
pub fn ibas(&self) -> impl Iterator<Item = &Announcement> {
self.ibas.iter()
}
/// Return an iterator of the encryptions of the polynomial coefficients
pub fn ds(&self) -> impl Iterator<Item = &Ciphertext> {
self.ds.iter()
}
/// Return an iterator of the response related to the randomness
pub fn zwvs(&self) -> impl Iterator<Item = &ResponseRandomness> {
self.zwvs.iter()
}
/// Return R
pub fn r(&self) -> &Scalar {
&self.r
}
}
// Computes the product of the powers of `z` given the `challenge_x`, `index` and a `bit_size`
fn powers_z_encs(
z: &[ResponseRandomness],
challenge_x: Scalar,
index: usize,
bit_size: u32,
) -> Scalar {
let idx = binrep(index, bit_size as u32);
let multz = z.iter().enumerate().fold(Scalar::one(), |acc, (j, zwv)| {
let m = if idx[j] {
zwv.z.clone()
} else {
&challenge_x - &zwv.z
};
&acc * m
});
multz
}
/// Provides an iterator over the encryptions of the product of the powers of `z`.
///
/// This struct is created by the `powers_z_encs_iter` function.
struct ZPowExp {
index: usize,
bit_size: u32,
z: Vec<ResponseRandomness>,
challenge_x: Scalar,
}
impl Iterator for ZPowExp {
type Item = GroupElement;
fn next(&mut self) -> Option<GroupElement> {
let z_pow = powers_z_encs(&self.z, self.challenge_x.clone(), self.index, self.bit_size);
self.index += 1;
Some(z_pow.negate() * GroupElement::generator())
}
fn size_hint(&self) -> (usize, Option<usize>) {
(usize::MAX, None)
}
}
// Return an iterator of the powers of `ZPowExp`.
#[allow(dead_code)] // can be removed if the default flag is ristretto instead of sec2
fn powers_z_encs_iter(z: &[ResponseRandomness], challenge_x: &Scalar, bit_size: &u32) -> ZPowExp {
ZPowExp {
index: 0,
bit_size: *bit_size,
z: z.to_vec(),
challenge_x: challenge_x.clone(),
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand_chacha::ChaCha20Rng;
use rand_core::SeedableRng;
#[test]
fn prove_verify1() {
let mut r = ChaCha20Rng::from_seed([0u8; 32]);
let public_key = PublicKey {
pk: GroupElement::from_hash(&[1u8]),
};
let unit_vector = UnitVector::new(2, 0);
let encryption_randomness = vec![Scalar::random(&mut r); unit_vector.len()];
let ciphertexts: Vec<Ciphertext> = unit_vector
.iter()
.zip(encryption_randomness.iter())
.map(|(i, r)| public_key.encrypt_with_r(&Scalar::from(i), r))
.collect();
let mut shared_string =
b"Example of a shared string. This could be the latest block hash".to_owned();
let crs = Crs::from_hash(&mut shared_string);
let proof = Zkp::generate(
&mut r,
&crs,
&public_key,
&unit_vector,
&encryption_randomness,
&ciphertexts,
);
assert!(proof.verify(&crs, &public_key, &ciphertexts))
}
#[test]
fn prove_verify() {
let mut r = ChaCha20Rng::from_seed([0u8; 32]);
let public_key = PublicKey {
pk: GroupElement::from_hash(&[1u8]),
};
let unit_vector = UnitVector::new(2, 0);
let encryption_randomness = vec![Scalar::random(&mut r); unit_vector.len()];
let ciphertexts: Vec<Ciphertext> = unit_vector
.iter()
.zip(encryption_randomness.iter())
.map(|(i, r)| public_key.encrypt_with_r(&Scalar::from(i), r))
.collect();
let mut shared_string =
b"Example of a shared string. This could be the latest block hash".to_owned();
let crs = Crs::from_hash(&mut shared_string);
let proof = Zkp::generate(
&mut r,
&crs,
&public_key,
&unit_vector,
&encryption_randomness,
&ciphertexts,
);
assert!(proof.verify(&crs, &public_key, &ciphertexts))
}
#[test]
fn false_proof() {
let mut r = ChaCha20Rng::from_seed([0u8; 32]);
let public_key = PublicKey {
pk: GroupElement::from_hash(&[1u8]),
};
let unit_vector = UnitVector::new(2, 0);
let encryption_randomness = vec![Scalar::random(&mut r); unit_vector.len()];
let ciphertexts: Vec<Ciphertext> = unit_vector
.iter()
.zip(encryption_randomness.iter())
.map(|(i, r)| public_key.encrypt_with_r(&Scalar::from(i), r))
.collect();
let mut shared_string =
b"Example of a shared string. This could be the latest block hash".to_owned();
let crs = Crs::from_hash(&mut shared_string);
let proof = Zkp::generate(
&mut r,
&crs,
&public_key,
&unit_vector,
&encryption_randomness,
&ciphertexts,
);
let fake_encryption = [
Ciphertext::zero(),
Ciphertext::zero(),
Ciphertext::zero(),
Ciphertext::zero(),
Ciphertext::zero(),
];
assert!(!proof.verify(&crs, &public_key, &fake_encryption))
}
#[test]
fn challenge_context() |
}
| {
let mut r = ChaCha20Rng::from_seed([0u8; 32]);
let public_key = PublicKey {
pk: GroupElement::from_hash(&[1u8]),
};
let unit_vector = UnitVector::new(2, 0);
let encryption_randomness = vec![Scalar::random(&mut r); unit_vector.len()];
let ciphertexts: Vec<Ciphertext> = unit_vector
.iter()
.zip(encryption_randomness.iter())
.map(|(i, r)| public_key.encrypt_with_r(&Scalar::from(i), r))
.collect();
let crs = GroupElement::from_hash(&[0u8]);
let ck = CommitmentKey::from(crs.clone());
let proof = Zkp::generate(
&mut r,
&crs,
&public_key,
&unit_vector,
&encryption_randomness,
&ciphertexts,
);
let mut cc1 = ChallengeContext::new(&ck, &public_key, &ciphertexts);
let cy1 = cc1.first_challenge(&proof.ibas);
let cx1 = cc1.second_challenge(&proof.ds);
// if we set up a new challenge context, the results should be equal
let mut cc2 = ChallengeContext::new(&ck, &public_key, &ciphertexts);
let cy2 = cc2.first_challenge(&proof.ibas);
let cx2 = cc2.second_challenge(&proof.ds);
assert_eq!(cy1, cy2);
assert_eq!(cx1, cx2);
// if we set up a new challenge with incorrect initialisation, results should differ
let crs_diff = GroupElement::from_hash(&[1u8]);
let ck_diff = CommitmentKey::from(crs_diff.clone());
let mut cc3 = ChallengeContext::new(&ck_diff, &public_key, &ciphertexts);
let cy3 = cc3.first_challenge(&proof.ibas);
let cx3 = cc3.second_challenge(&proof.ds);
assert_ne!(cy1, cy3);
assert_ne!(cx1, cx3);
// if we generate a new challenge with different IBAs, but same Ds, both results should differ
let proof_diff = Zkp::generate(
&mut r,
&crs,
&public_key,
&unit_vector,
&encryption_randomness,
&ciphertexts,
);
let mut cc4 = ChallengeContext::new(&ck, &public_key, &ciphertexts);
let cy4 = cc4.first_challenge(&proof_diff.ibas);
let cx4 = cc4.second_challenge(&proof.ds);
assert_ne!(cy1, cy4);
assert_ne!(cx1, cx4);
// if we generate a challenge with different Ds, only the second scalar should differ
let mut cc5 = ChallengeContext::new(&ck, &public_key, &ciphertexts);
let cy5 = cc5.first_challenge(&proof.ibas);
let cx5 = cc5.second_challenge(&proof_diff.ds);
assert_eq!(cy1, cy5);
assert_ne!(cx1, cx5);
} |
manager.go | package controllers
import (
"fmt"
"net/http"
"github.com/kooinam/fab.io/views"
socketio "github.com/googollee/go-socket.io"
"github.com/googollee/go-socket.io/engineio"
"github.com/googollee/go-socket.io/engineio/transport"
"github.com/googollee/go-socket.io/engineio/transport/polling"
"github.com/googollee/go-socket.io/engineio/transport/websocket"
"github.com/kooinam/fab.io/helpers"
"github.com/kooinam/fab.io/logger"
)
// Manager is singleton manager for controller module
type Manager struct {
viewsManager *views.Manager
server *socketio.Server
controllerHandlers map[string]*ControllerHandler
}
// Setup used to setup cotroller manager
func (manager *Manager) Setup(viewsManager *views.Manager) {
manager.viewsManager = viewsManager
manager.controllerHandlers = make(map[string]*ControllerHandler)
// transporter := websocket.Default
// transporter.CheckOrigin = func(req *http.Request) bool {
// return true
// }
server, err := socketio.NewServer(&engineio.Options{
Transports: []transport.Transport{
polling.Default,
&websocket.Transport{
CheckOrigin: func(r *http.Request) bool {
return true
},
},
},
})
if err != nil {
logger.Debug("socket.io error %v", err)
}
manager.server = server
server.OnConnect("/", func(conn socketio.Conn) error {
logger.Debug("connected: %v%v - %v - %v", conn.URL().Path, conn.URL().RawQuery, conn.URL().RawPath, conn.ID())
return nil
})
server.OnDisconnect("/", func(conn socketio.Conn, reason string) {
logger.Debug("disconnected: %v - %v, %v ", conn.Namespace(), conn.ID(), reason)
})
}
// RegisterController used to register controller
func (manager *Manager) RegisterController(nsp string, controllable Controllable) {
formattedNsp := fmt.Sprintf("/%v", nsp)
manager.controllerHandlers[formattedNsp] = makeControllerHandler(manager, manager.server, formattedNsp, controllable)
manager.server.OnError(formattedNsp, func(conn socketio.Conn, e error) {
logger.Debug("%v", e)
})
}
// Serve used to serve
func (manager *Manager) Serve(port string, httpHandler func()) {
logger.Debug("Initializing fab.io...")
server := manager.server
http.Handle("/socket.io/", corsMiddleware(server))
if httpHandler != nil {
httpHandler()
}
go server.Serve()
logger.Debug("Starting Socket Server @ %v...", port)
http.ListenAndServe(fmt.Sprintf(":%v", port), nil)
}
// BroadcastEvent used to broadcast event
func (manager *Manager) BroadcastEvent(nsp string, room string, eventName string, view interface{}, parameters helpers.H) {
event := makeEvent(nsp, room, eventName, view, parameters)
event.Broadcast(manager.server)
}
func corsMiddleware(next http.Handler) http.Handler | {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
allowHeaders := "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST, PUT, PATCH, GET, DELETE")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization")
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Headers", allowHeaders)
next.ServeHTTP(w, r)
})
} |
|
error.go | // Copyright 2017 Northern.tech AS
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s3
import (
"encoding/xml"
"net/http"
"github.com/pkg/errors"
)
// getS3Error tries to extract S3 error information from HTTP response. Response
// body is partially consumed. Returns an error with whatever error information returned
// by S3 or just a generic description of a problem in case the response is not
// a correct error response.
//
// See https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html for
// example error response returned by S3.
func getS3Error(r *http.Response) error | {
s3rsp := struct {
XMLName xml.Name `xml:"Error"`
Code string `xml:"Code"`
Message string `xml:"Message"`
RequestId string `xml:"RequestId"`
Resource string `xml:"Resource"`
}{}
if r.StatusCode < 300 ||
r.Header.Get("Content-Type") != "application/xml" {
return errors.Errorf("unexpected S3 error response, status: %v, type: %s",
r.StatusCode, r.Header.Get("Content-Type"))
}
dec := xml.NewDecoder(r.Body)
err := dec.Decode(&s3rsp)
if err != nil {
return errors.Wrap(err, "failed to decode XML encoded error response")
}
return errors.Errorf("S3 request failed with code %s: %s, request ID: %s",
s3rsp.Code, s3rsp.Message, s3rsp.RequestId)
} |
|
utils.py | # -*- coding: utf-8 -*-
"""Standard utility functions used throughout AlphaGradient"""
# Standard Imports
from __future__ import annotations
from abc import ABC, abstractmethod
import builtins
from datetime import (
date,
datetime,
time,
timedelta,
)
import math
from pathlib import Path
# Third Party Imports
import numpy as np
import pandas as pd
# Typing
from typing import ( | Literal,
Generator,
Generic,
Iterable,
Optional,
TypeVar,
Union,
)
T = TypeVar("T")
class PropertyType(Generic[T]):
"""A Type class for property objects themselves, before being bound to a class instance"""
def fget(self, *args: Any) -> T:
...
Property = builtins.property
"""A Type for builtin properties that have been bound to a class instance"""
PyNumber = Union[int, float]
"""Numeric type that does not include complex numbers (only native python types)"""
Number = Union[PyNumber, np.number, pd.core.arrays.numeric.NumericDtype]
"""Numeric type that does not include complex numbers"""
DatetimeLike = Union[pd.Timestamp, np.datetime64, date, datetime, str]
"""Objects convertable to python datetimes"""
TimeLike = Union[time, str]
"""Objects convertable to python time objects"""
DateOrTime = Union[DatetimeLike, time]
"""Objects that are either DatetimeLike or TimeLike in nature"""
if TYPE_CHECKING:
from typeshed import SupportsLessThanT as SLTT
_global_persistent_path: PropertyType[Path]
def auto_batch(iterable: Iterable) -> Generator:
"""
Returns a generator which yields automatically sized batches
Given a sized iterable, determines an optimal batch size to be used for
multiprocessing purposes. Using this batch size, returns a generator which
yields batches of the iterable with the optimal size
Parameters:
iterable: An iterable from which to create a batch generator
Returns:
The batch generator of the iterable input
"""
return get_batches(iterable, auto_batch_size(iterable))
def auto_batch_size(iterable: Iterable) -> int:
"""
Returns a multiprocessing-optimal batch size for an iterable
Given an iterable, returns an integer value representing an optimal batch
size for use in python's multiprocessing library
Parameters:
iterable (Iterable): Sized iterable to determine optimal batch size for
Returns:
The optimal batch size for multiprocessing
"""
# Converting to a sized iterable to guarantee __len__ functionality
iterable = list(iterable)
# Output Parameters
horizontal_offset = 10000
horizontal_stretch = 70 / 100_000_000
vertical_offset = 100
# Building the quadratic
output: Number
output = len(iterable) - horizontal_offset
output = output**2
output *= -1
output *= horizontal_stretch
output += vertical_offset
# Output bounded between 30 and 100
return bounded(int(output), lower=30, upper=100)
def bounded(
to_bound: SLTT, lower: Optional[SLTT] = None, upper: Optional[SLTT] = None
) -> SLTT:
"""
Bounds an object between a lower and upper bound
Given an object that defines behavior for comparison (__lt__, __gt__),
returns the object bounded between the lower and upper bounds. Boundaries
will be ommited if they are not provided (None). If lower and upper are not
None, they must be of the same type as to_bound.
Type Explanation:
SLTT (SupportsLessThanT): A TypeVar which implements the __lt__ method.
Parameters:
to_bound (SLTT): the object to be bounded
lower (Optional[SLTT]): the lower boundary of the operation
upper (Optional[SLTT]): the upper boundary of the operation
Returns:
The bounded object
"""
if lower is None and upper is None:
raise ValueError(
"Of the parameters 'lower' and 'upper', at least one must be" "specified"
)
if lower:
to_bound = max(to_bound, lower)
if upper:
to_bound = min(to_bound, upper)
return to_bound
def deconstruct_dt(dt: DateOrTime) -> dict[str, float]:
"""
Returns a dictionary of datetime attribute values on object 'dt'
Given a DatetimeLike object, returns a dictionary where keys are the
object's date and time related attribute names, and values are the object's
associated attribute values.
Parameters:
dt (DateOrTime): the dt to deconstruct
Returns:
A dictionary of attributes and their associated values on dt
Raises:
TypeError: Raised if dt is not a datetime-like object, as it wont have
the proper attributes.
"""
# The potential attributes to be accessed
d = ["year", "month", "day"]
t = ["hour", "minute", "second", "microsecond"]
attrs = []
# Accept string arguments to convert to datetime
if isinstance(dt, str):
dt = read_timestring(dt)
# Determine which elements should be accessed on the dt
if isinstance(dt, datetime):
attrs = d + t
elif isinstance(dt, time):
attrs = t
elif isinstance(dt, date):
attrs = d
else:
raise TypeError(f"{dt=} is not a valid datetime object")
# Collecting the attributes
dtdict = {}
for attr in attrs:
dtdict[attr] = getattr(dt, attr)
return dtdict
def get_batches(iterable: Iterable, size: int = 100) -> Generator:
"""
Returns a generator of the iterable which yields batches of the given size
Given an iterable, uses the size parameter to create a generator which
yields batches of the iterable of the given size.
Parameter:
iterable: The iterable to yield batches of
size: The batch size of the returned generator
Returns:
A generator which yields batches of size 'size' of the iterable
"""
# Because we will be indexing the iterable, we must instantiate the entire
# thing in memory in case it isnt (ie generators)
iterable = list(iterable)
last = len(iterable)
for i in range(math.ceil(last / size)):
start = i * size
end = start + size
end = end if end < last else last
yield iterable[start:end]
def get_time(t: DateOrTime) -> time:
"""
Given a timestring or datetime-like object, returns a datetime.time object
Given an object t which represents a time or a datetime, returns a native
python datetime.time object of the appropriate time. t can be an isoformat
time string or datetime string, or a datetime-like object
Parameters:
dt (DateOrTime): The time object to convert
Returns:
The converted datetime.time object
"""
if isinstance(t, (time, str)):
return to_time(t)
return to_datetime(t).time()
def get_weekday(dt: DatetimeLike) -> str:
"""
Returns the day of the week on which a DatetimeLike object falls
Parameters:
dt (DatetimeLike): The object whose weekday is determined
Returns:
String of the day of the week on which the DatetimeLike object falls
"""
weekdays = {
0: "Monday",
1: "Tuesday",
2: "Wednesday",
3: "Thursday",
4: "Friday",
5: "Saturday",
6: "Sunday",
}
return weekdays[to_datetime(dt).weekday()]
def is_func(f: Any) -> bool:
"""
Returns a boolean value indicating whether or not f is a kind of function
Given an object f, returns a boolean value indicating whether or not the
object is a function. Idenfities all python objects whose sole or primary
purpose is to be called directly, rather than objects that simply support
an implementation of __call__.
Behavior is slightly different than the inspect module's isfunction(), as it
includes methods (bound and unbound), as well as abstract, static, and class
methods.
A 'function' is an instance of any of the following:
* function
* method (bound or unbound)
* staticmethod
* classmethod
* abstractmethod
* lambda
* built-in-function
Parameters:
f: The object who's status as a function is being determined
Returns:
True if f is a method, function, builtin-method-or-function, or lambda,
else False
"""
# Fake class to access type 'method' and 'classmethod'
class C:
def method(self):
pass
# Getting abstract base methods
class ABCC(ABC):
@abstractmethod
def amethod(self):
pass
# Fake function to access type 'function'
def func():
pass
# Getting classic and static methods
cmethod = classmethod(func)
smethod = staticmethod(func)
# Fake lambda to access type 'lambda'
lamb = lambda: None
# Fake instance to access type 'bound method'
c = C()
# Gathering all callable types
functype = type(func)
methodtype = type(C.method)
classmethodtype = type(cmethod)
staticmethodtype = type(smethod)
abstractmethodtype = type(ABCC.amethod)
boundmethodtype = type(c.method)
lambdatype = type(lamb)
builtintype = type(print)
return isinstance(
f,
(
functype,
methodtype,
boundmethodtype,
lambdatype,
builtintype,
abstractmethodtype,
classmethodtype,
staticmethodtype,
),
)
def nearest_expiry(
expiry: DatetimeLike, method: Literal["after", "before", "both"] = "after"
) -> datetime:
"""
Returns the nearest valid expiry to the input datetime object
Determining expiries for options contracts can be difficult, because they
must fall on a business day, and their expiry time must be the market close.
Given an expiry whose validity is unknown, this function returns the
nearest expiry that is guaranteed to be valid. If the given expiry is
valid, it will be unchanged when it is returned.
The method argument is used to determine how the 'nearest' is defined. It
has three options: "after", "before", and "both"
Method must be one of the following string literals:
* "after": returns the nearest expiry that is AFTER the input expiry
* "before": returns the nearest expiry that is BEFORE the input expiry.
* | "both": compares the distances of the nearest before and after, and
| return the smaller of the two. In the case that they are equal, the
| date determined by "after" will be used.
The default argument is "after" because using "before" or "both" can
potentially lead to dangerous behavior for algorithms, as it can return an
expiry which is before the current date of the algorithm. This can cause
options contracts to initialize as expired. Only change the method
argument if you are positive that the returned expiry will be greater
than the algorithm's current date.
Parameters:
expiry (DatetimeLike):
The expiry who's closest valid expiry will be determined
method:
One of "after", "before", or "both"
Returns:
The nearest valid expiry
"""
# Ensuring expiry is a pydatetime
expiry = to_datetime(expiry)
# All expiries must expire at market close (4PM)
expiry = set_time(expiry, "4:00 PM")
# Change the expiry day if it is not a weekday
if expiry.weekday() > 4:
# Closest AFTER
if method == "after":
dist = 7 - expiry.weekday()
expiry += timedelta(days=dist)
# Closest BEFORE
elif method == "before":
dist = expiry.weekday() - 4
expiry -= timedelta(days=dist)
# Comparing both
elif method == "both":
bdist = expiry.weekday() - 4
adist = 7 - expiry.weekday()
if bdist < adist:
expiry -= timedelta(days=bdist)
else:
expiry += timedelta(days=adist)
return expiry
def optimal_start(
start: datetime,
max_start: datetime,
min_end: datetime,
end: Optional[DatetimeLike] = None,
t: Optional[TimeLike] = None,
) -> datetime:
"""
Based an Environment's instantiated/tracked assets, returns an optimal datetime
for starting a backtest
Returns a backtest starting datetime that:
* Is guaranteed to be within the date range of all intantiated assets
* | Is guaranteed to have ample time for calculations of historical
| volatility, beta, percent change etc. BEFORE the start date
* Automatically adjusts to accomodate shorter ending periods
Parameters:
start:
A datetime object indictating the actual starting datetime
max_start:
A datetime object indicating the maximum possible starting datetime
min_end:
A datetime object indicating the minimum possible ending datetime
end (Optional[DatetimeLike]):
The desired endpoint on which to base the optimal start point
t (Optional[TimeLike]):
The returned optimal start's time
Returns:
The optimal starting datetime
"""
end = min_end if end is None else to_datetime(end)
# If the maximum start date is before the minimum end date, there is
# no valid 'optimal start', because there is no date range that allows
# backtesting of all available data.
if max_start >= end:
return start
# Determining the optimal start period. To avoid errors, we will not sync to the beginning
optimal_delta = (end - max_start) / 2
optimal_date = max_start + optimal_delta
# Setting the optimal date's time to market open unless specified otherwise
t = "00:00:00" if t is None else to_time(t)
set_time(optimal_date, t)
# Bounding the date to acceptable minimums and maximums
lower_bound = set_time(max_start + timedelta(days=1), t)
upper_bound = set_time(max_start + timedelta(days=365), t)
optimal_start = bounded(optimal_date, lower=lower_bound, upper=upper_bound)
return optimal_start
def progress_print(to_print: Any, last: list[int] = [0]) -> None:
"""Prints, but returns the carriage to the front of the last print"""
print("\r" + (" " * last[0]), end="\r", flush=True) # type: ignore[operator]
print(to_print, end="", flush=True)
last[0] = len(str(to_print))
def read_timestring(timestring: str) -> time:
"""
Given a timestring, returns a datetime.time object representative of the time
This function reads in 'timestrings', which are one of two things:
#. | Isoformat times as strings, using 24 hours
| (eg 04:00:00, 18:30, 02:59:59.99, etc)
#. | Strings based on 12 hour clocks
| (see ag.utils.read_twelve_hour_timestring docs)
Using this timestring, returns a python datetime.time object corresponding
to the time in the timestring. if dtype is set to dict, a deconstructed
datetime attr dictionary will instead be returned. For more info on
dtdicts, read the docs for ag.utils.deconstruct_dt
Parameters:
timestring:
string representing the time
dtype:
The type of data to return
Returns:
The time or dict object corresponding to the time in the timestring
"""
try:
return read_twelve_hour_timestring(timestring)
except (TypeError, ValueError) as e:
return time.fromisoformat(timestring)
def read_twelve_hour_timestring(timestring: str) -> time:
"""Reads a timestring based on a 12 hour clock and returns a time
Given a timestring representing a time on a 12 hour clock, returns the
appropriate time object
Must be formatted as follows:
* hour | This is the only required value, integer
* minute | separated from hour by a colon, optional, integer
* second | separated from minute by a colon, optional, float
* AM/PM | string 'AM' or 'PM', separated from second by a space
When AM or PM is not provided in the timestring, AM will be assumed.
Valid Examples:
* '4:30 PM'
* '4:30 AM'
* '1 PM'
* '1'
* '11:59:59.999 PM'
* '12:00:00 AM'
Invalid Examples:
* '0:00'
* '13:30'
* '103 PM'
* '0'
* '22'
* '4:30:99 PM'
* '3:99 PM'
Parameters:
timestring: The string containing the time to convert to a time object
Returns:
The corresponding time object
Raises:
TypeError:
When timestring is not a string. Only str objects can be parsed
ValueError:
When the timetring is invalid / improperly formatted.
"""
# Timestrings must be strs
if not isinstance(timestring, str):
raise TypeError(f"timestring must be a string, got {type(timestring)}")
# Variable Initialization
ampm = "AM"
info = []
timestring = timestring.split(" ") # type: ignore[assignment]
# Getting AM/PM component
if len(timestring) > 1:
ampm = timestring[1]
# Getting individual time components
info = timestring[0].split(":")
# isoformat is 00:00:00.00, max 3 colons
if len(info) > 4:
raise ValueError(f"Failed to parse timestring {timestring}")
# collecting the attributes necessary to create a time object
tdict = {}
attrs = ["hour", "minute", "second", "microsecond"]
for attr, value in zip(attrs, info):
tdict[attr] = int(value)
# Setting missing components to 0
for attr in attrs:
if not tdict.get(attr):
tdict[attr] = 0
# hours less and 1 and more than 12 are off limits in 12 hour clocks
if not 1 <= tdict["hour"] <= 12:
raise ValueError(f"Failed to parse timestring {timestring}")
# 12:30 AM is 00:30 isoformat
if ampm == "AM" and tdict["hour"] == 12:
tdict["hour"] == 0
# 12:30 PM is 12:30 isoformat, 1:30 PM is 13:30 isoformat
elif ampm == "PM" and tdict["hour"] < 12:
tdict["hour"] += 12
# Building and returning a time object
return time(**tdict) # type: ignore[arg-type]
def set_time(dt: DatetimeLike, t: DateOrTime) -> datetime:
"""Sets the given datetime-like object to the given time
Given a DatetimeLike object 'dt' and a time-like object 't', returns a
datetime like object that shares the date of dt and the time of t.
Very similar to datetime.combine, but accepts datetime objects for both
inputs.
Parameters:
dt (DatetimeLike): Datetime to convert
t (DateOrTime): Time to convert to
Returns:
python datetime.datetime object with converted time
"""
# Initializing the new time that will be set
newtime: dict[str, float] = {}
# Reading the necessary time attributes
if isinstance(t, str):
t = read_timestring(t)
newtime = deconstruct_dt(t)
elif isinstance(t, time):
newtime = deconstruct_dt(t)
else:
newtime = deconstruct_dt(to_datetime(t).time())
# Creating the new datetime with t=t
return to_datetime(dt).replace(**newtime) # type: ignore [arg-type]
def timestring(t: DateOrTime) -> str:
"""Converts a time-like object to a 12-hour-clock timestring
Given a time-like object t, returns a timestring represented by the
12-hour-clock (eg. 4:30 PM).
Parameters:
t (DateOrTime):
date or time object to read into a 12-hour-clock-based timestring
Returns:
A string representing the time on a 12-hour-clock
"""
# Ensuring that t is a time object
if not isinstance(t, time):
t = to_datetime(t).time()
# Deconstructing components to create a time string
ampm = "AM"
hour = t.hour
minute = t.minute if t.minute > 9 else f"0{t.minute}"
if hour > 12:
ampm = "PM"
hour -= 12
return f"{hour}:{minute} {ampm}"
def to_datetime(dtlike: DatetimeLike) -> datetime:
"""
Given a datetime-like object, converts it to a python standard datetime
Parameters:
dtlike (DatetimeLike):
The Datetime-convertable object
Returns:
The converted python datetime
Raises:
TypeError: Only accepts python-datetime-convertable objects
"""
if isinstance(dtlike, datetime):
return dtlike
elif isinstance(dtlike, pd.Timestamp):
return dtlike.to_pydatetime()
elif isinstance(dtlike, np.datetime64):
return pd.Timestamp(dtlike).to_pydatetime()
elif isinstance(dtlike, date):
return datetime.combine(dtlike, datetime.min.time())
elif isinstance(dtlike, str):
return datetime.fromisoformat(dtlike)
raise TypeError(f"Can not convert passed object {dtlike} to python datetime")
def to_step(current: datetime, delta: Union[DateOrTime, timedelta, float]) -> timedelta:
"""
Converts an ambiguous delta object to a python timedelta
Given an amiguous object which can in some way be interpreted as a timedelta
relative to some 'current' time, converts that object to an appropriate
timedelta object, or 'step' in time.
Parameters:
current:
The 'current' time, which determines how to interpret the delta
delta (Union[DateOrTime, timedelta, float]);
The object being passed that may represent a 'step' in time
Returns:
the appropriate timedelta 'step'
Raises:
TypeError:
When passed a type that can not be coerced/interpreted
ValueError:
When a type-appropriate object can not be coerced, or is in some way
invalid (eg. the step in time is BEFORE the current time)
"""
# Multiple parses must be made on strings to successfully coerce all of them
if isinstance(delta, str):
try:
delta = set_time(current, read_timestring(delta))
except ValueError:
delta = datetime.fromisoformat(delta) # type: ignore[arg-type]
elif isinstance(delta, time):
delta = set_time(current, delta)
elif isinstance(delta, (float, int)):
delta = current + timedelta(days=delta)
elif isinstance(delta, timedelta):
delta = current + delta
# if isinstance(delta, DatetimeLike):
else:
delta = to_datetime(delta)
if delta > current:
return delta - current
raise ValueError(
f"Passed delta {delta} is prior to current time {current}. Please "
"choose a time AFTER the current date."
)
def to_time(tlike: TimeLike) -> time:
"""
Given a TimeLike object, converts it to a python standard time object
Parameters:
tlike:
The time-convertable object
Returns:
The converted python time object
Raises:
TypeError: Only accepts python-time-convertable objects
"""
if isinstance(tlike, str):
return read_timestring(tlike)
elif isinstance(tlike, time):
return tlike
raise TypeError(f"Can not convert passed object {tlike} to python time")
class NullClass:
"""
A class designed to take the place of other functions, modules, or classes
This class stands in place of a function, class, or module attached to
another class as an attribute. When an attribute is initialized as a
NullClass, one can safely access it as an attribute, call it, and access
attributes on it. These actions can also be performed recursively; any of
these operations performed on the nullclass will simply return itself,
allowing them to be chained infinitely.
Use this class in place of another function or class in order to safely
use an attribute without making constant checks.
This is most useful in place of functions/classes that perform
logging/printing, but also makes sense in place of functions that modify
things in place or always return None.
Examples:
.. highlight:: python
.. code-block:: python
class MyClass:
def __init__(self, data, verbose=False):
# This is cleaner and more pythonic than...
self.print = print if verbose else NullClass()
self.print("Initialized as Verbose!")
# Alternative 1
self.print = print if verbose else lambda *args, **kwargs: None
self.print("Initialized as Verbose!")
# Alternative 2
self.print = print if print is verbose else None
if self.print is not None:
self.print("Initialized as Verbose!")
# Alternative 3
self.verbose = verbose
if self.verbose:
print("Initialized as Verbose!")
# etc etc etc...
# This is cleaner and more pythonic than...
self.tqdm = tqdm.progress_bar if verbose else NullClass()
with self.tqdm(total=1000) as pbar:
while condition:
self.do_something()
pbar.update(1) # Safe!
# Alternative
self.verbose = verbose
if verbose:
with tqdm.progress_bar(total=1000) as pbar:
while condition:
self.do_something()
pbar.update(1)
else:
while condition:
self.do_something() # gross.
"""
def __call__(self, *args: Any, **kwargs: Any) -> NullClass:
return self
def __getattr__(self, attr: str) -> NullClass:
return self
def __enter__(self, *args, **kwargs) -> NullClass:
return self
def __exit__(self, *args, **kwargs) -> None:
pass
def __bool__(self) -> bool:
return False | TYPE_CHECKING,
Any, |
objects_get_members_test.go | package pubnub
import (
"fmt"
"strconv"
"testing"
h "github.com/pubnub/go/tests/helpers"
"github.com/pubnub/go/utils"
"github.com/stretchr/testify/assert"
)
func AssertGetMembers(t *testing.T, checkQueryParam, testContext, withFilter bool) |
func TestGetMembers(t *testing.T) {
AssertGetMembers(t, true, false, false)
}
func TestGetMembersContext(t *testing.T) {
AssertGetMembers(t, true, true, false)
}
func TestGetMembersWithFilter(t *testing.T) {
AssertGetMembers(t, true, false, true)
}
func TestGetMembersWithFilterContext(t *testing.T) {
AssertGetMembers(t, true, true, true)
}
func TestGetMembersResponseValueError(t *testing.T) {
assert := assert.New(t)
pn := NewPubNub(NewDemoConfig())
opts := &getMembersOpts{
pubnub: pn,
}
jsonBytes := []byte(`s`)
_, _, err := newPNGetMembersResponse(jsonBytes, opts, StatusResponse{})
assert.Equal("pubnub/parsing: Error unmarshalling response: {s}", err.Error())
}
func TestGetMembersResponseValuePass(t *testing.T) {
assert := assert.New(t)
pn := NewPubNub(NewDemoConfig())
opts := &getMembersOpts{
pubnub: pn,
}
jsonBytes := []byte(`{"status":200,"data":[{"id":"id0","custom":{"a3":"b3","c3":"d3"},"user":{"id":"id0","name":"name","externalId":"extid","profileUrl":"purl","email":"email","custom":{"a":"b","c":"d"},"created":"2019-08-20T13:26:19.140324Z","updated":"2019-08-20T13:26:19.140324Z","eTag":"AbyT4v2p6K7fpQE"},"created":"2019-08-20T13:26:24.07832Z","updated":"2019-08-20T13:26:24.07832Z","eTag":"AamrnoXdpdmzjwE"}],"totalCount":1,"next":"MQ","prev":"NQ"}`)
r, _, err := newPNGetMembersResponse(jsonBytes, opts, StatusResponse{})
assert.Equal(1, r.TotalCount)
assert.Equal("MQ", r.Next)
assert.Equal("NQ", r.Prev)
assert.Equal("id0", r.Data[0].ID)
assert.Equal("name", r.Data[0].User.Name)
assert.Equal("extid", r.Data[0].User.ExternalID)
assert.Equal("purl", r.Data[0].User.ProfileURL)
assert.Equal("email", r.Data[0].User.Email)
assert.Equal("2019-08-20T13:26:19.140324Z", r.Data[0].User.Created)
assert.Equal("2019-08-20T13:26:19.140324Z", r.Data[0].User.Updated)
assert.Equal("AbyT4v2p6K7fpQE", r.Data[0].User.ETag)
assert.Equal("b", r.Data[0].User.Custom["a"])
assert.Equal("d", r.Data[0].User.Custom["c"])
assert.Equal("2019-08-20T13:26:24.07832Z", r.Data[0].Created)
assert.Equal("2019-08-20T13:26:24.07832Z", r.Data[0].Updated)
assert.Equal("AamrnoXdpdmzjwE", r.Data[0].ETag)
assert.Equal("b3", r.Data[0].Custom["a3"])
assert.Equal("d3", r.Data[0].Custom["c3"])
assert.Nil(err)
}
| {
assert := assert.New(t)
pn := NewPubNub(NewDemoConfig())
incl := []PNMembersInclude{
PNMembersCustom,
}
queryParam := map[string]string{
"q1": "v1",
"q2": "v2",
}
if !checkQueryParam {
queryParam = nil
}
inclStr := EnumArrayToStringArray(incl)
o := newGetMembersBuilder(pn)
if testContext {
o = newGetMembersBuilderWithContext(pn, backgroundContext)
}
spaceID := "id0"
limit := 90
start := "Mxmy"
end := "Nxny"
o.SpaceID(spaceID)
o.Include(incl)
o.Limit(limit)
o.Start(start)
o.End(end)
o.Count(false)
o.QueryParam(queryParam)
if withFilter {
o.Filter("custom.a5 == 'b5' || custom.c5 == 'd5'")
}
path, err := o.opts.buildPath()
assert.Nil(err)
h.AssertPathsEqual(t,
fmt.Sprintf("/v1/objects/%s/spaces/%s/users", pn.Config.SubscribeKey, "id0"),
path, []int{})
body, err := o.opts.buildBody()
assert.Nil(err)
assert.Empty(body)
if checkQueryParam {
u, _ := o.opts.buildQuery()
assert.Equal("v1", u.Get("q1"))
assert.Equal("v2", u.Get("q2"))
assert.Equal(string(utils.JoinChannels(inclStr)), u.Get("include"))
assert.Equal(strconv.Itoa(limit), u.Get("limit"))
assert.Equal(start, u.Get("start"))
assert.Equal(end, u.Get("end"))
assert.Equal("0", u.Get("count"))
if withFilter {
assert.Equal("custom.a5 == 'b5' || custom.c5 == 'd5'", u.Get("filter"))
}
}
} |
main.rs | use std::{
collections::HashSet,
io::{BufRead, BufReader},
error::Error,
fs::File,
};
fn main() -> Result<(), Box<dyn Error>> {
let deltas = read_deltas()?;
let final_frequency: i32 = deltas.iter().sum();
println!("The final frequency is {}.", final_frequency);
let mut frequency = 0;
let mut past_frequencies = HashSet::new();
past_frequencies.insert(frequency);
for delta in deltas.iter().cycle() {
frequency += delta; | if !past_frequencies.insert(frequency) {
break;
}
}
println!("The first repeated frequency value is {}.", frequency);
Ok(())
}
fn read_deltas() -> Result<Vec<i32>, Box<dyn Error>> {
let file = File::open("puzzle_input.txt")?;
let reader = BufReader::new(file);
let mut deltas = Vec::new();
for line in reader.lines() {
let delta = line?.trim().parse()?;
deltas.push(delta);
}
Ok(deltas)
} | |
task.d.ts | export declare class Task {
callback: CallableFunction; | args: Array<Object>;
constructor(callback?: CallableFunction, args?: Array<Object>);
} |
|
command_remote_mount_buckets.go | package shell
import (
"flag"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util"
"io"
"path/filepath"
"regexp"
)
func init() {
Commands = append(Commands, &commandRemoteMountBuckets{})
}
type commandRemoteMountBuckets struct {
}
func (c *commandRemoteMountBuckets) Name() string {
return "remote.mount.buckets"
}
func (c *commandRemoteMountBuckets) Help() string {
return `mount all buckets in remote storage and pull its metadata
# assume a remote storage is configured to name "cloud1"
remote.configure -name=cloud1 -type=s3 -access_key=xxx -secret_key=yyy
# mount all buckets
remote.mount.buckets -remote=cloud1
# after mount, start a separate process to write updates to remote storage
weed filer.remote.sync -filer=<filerHost>:<filerPort> -createBucketAt=cloud1
`
}
func (c *commandRemoteMountBuckets) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
remoteMountBucketsCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
remote := remoteMountBucketsCommand.String("remote", "", "an already configured storage name")
bucketPattern := remoteMountBucketsCommand.String("bucketPattern", "", "match existing bucket name with wildcard characters '*' and '?'")
trimBucketSuffix := remoteMountBucketsCommand.Bool("trimBucketSuffix", true, "remote suffix auto generated by 'weed filer.remote.sync'")
apply := remoteMountBucketsCommand.Bool("apply", false, "apply the mount for listed buckets")
if err = remoteMountBucketsCommand.Parse(args); err != nil {
return nil
}
if *remote == "" {
_, err = listExistingRemoteStorageMounts(commandEnv, writer)
return err
}
// find configuration for remote storage
remoteConf, err := filer.ReadRemoteStorageConf(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress, *remote)
if err != nil {
return fmt.Errorf("find configuration for %s: %v", *remote, err)
}
// get storage client
remoteStorageClient, err := remote_storage.GetRemoteStorage(remoteConf)
if err != nil {
return fmt.Errorf("get storage client for %s: %v", *remote, err)
}
buckets, err := remoteStorageClient.ListBuckets()
if err != nil {
return fmt.Errorf("list buckets on %s: %v", *remote, err)
}
fillerBucketsPath, err := readFilerBucketsPath(commandEnv)
if err != nil {
return fmt.Errorf("read filer buckets path: %v", err)
}
hasSuffixPattern, _ := regexp.Compile(".+-[0-9][0-9][0-9][0-9]")
for _, bucket := range buckets {
if *bucketPattern != "" {
if matched, _ := filepath.Match(*bucketPattern, bucket.Name); !matched {
continue
}
}
fmt.Fprintf(writer, "bucket %s\n", bucket.Name)
localBucketName := bucket.Name
if *trimBucketSuffix {
if hasSuffixPattern.MatchString(localBucketName) {
localBucketName = localBucketName[:len(localBucketName)-5]
fmt.Fprintf(writer, " mount bucket %s as %s\n", bucket.Name, localBucketName)
}
}
if *apply {
dir := util.FullPath(fillerBucketsPath).Child(localBucketName)
remoteStorageLocation := &remote_pb.RemoteStorageLocation{
Name: *remote,
Bucket: bucket.Name,
Path: "/",
}
// sync metadata from remote
if err = syncMetadata(commandEnv, writer, string(dir), true, remoteConf, remoteStorageLocation); err != nil |
// store a mount configuration in filer
if err = filer.InsertMountMapping(commandEnv, string(dir), remoteStorageLocation); err != nil {
return fmt.Errorf("save mount mapping %s to %+v: %v", dir, remoteStorageLocation, err)
}
}
}
return nil
}
| {
return fmt.Errorf("pull metadata on %+v: %v", remoteStorageLocation, err)
} |
section.rs | //! Support for generating a standard wasm interface types
//!
//! This module has all the necessary support for generating a full-fledged
//! standard wasm interface types section as defined by the `wit_walrus`
//! crate. This module also critically assumes that the WebAssembly module
//! being generated **must be standalone**. In this mode all sorts of features
//! supported by `#[wasm_bindgen]` aren't actually supported, such as closures,
//! imports of global js names, js getters/setters, exporting structs, etc.
//! These features may all eventually come to the standard bindings proposal,
//! but it will likely take some time. In the meantime this module simply focuses
//! on taking what's already a valid wasm module and letting it through with a
//! standard WebIDL custom section. All other modules generate an error during
//! this binding process.
//!
//! Note that when this function is called and used we're also not actually
//! generating any JS glue. Any JS glue currently generated is also invalid if
//! the module contains the wasm bindings section and it's actually respected.
use crate::wit::{AdapterId, AdapterJsImportKind, AdapterType, Instruction};
use crate::wit::{AdapterKind, NonstandardWitSection, WasmBindgenAux};
use crate::wit::{AuxExport, InstructionData};
use crate::wit::{AuxExportKind, AuxImport, AuxValue, JsImport, JsImportName};
use anyhow::{anyhow, bail, Context, Error};
use std::collections::HashMap;
use walrus::Module;
pub fn add(module: &mut Module) -> Result<(), Error> {
let nonstandard = module
.customs
.delete_typed::<NonstandardWitSection>()
.unwrap();
let aux = module.customs.delete_typed::<WasmBindgenAux>().unwrap();
let mut section = wit_walrus::WasmInterfaceTypes::default();
let WasmBindgenAux {
extra_typescript: _, // ignore this even if it's specified
local_modules,
snippets,
package_jsons,
export_map,
import_map,
imports_with_catch,
imports_with_variadic,
imports_with_assert_no_shim: _, // not relevant for this purpose
enums,
structs,
// irrelevant ids used to track various internal intrinsics and such
externref_table: _,
externref_alloc: _,
externref_drop: _,
externref_drop_slice: _,
exn_store: _,
shadow_stack_pointer: _,
function_table: _,
thread_destroy: _,
} = *aux;
let adapter_context = |id: AdapterId| {
if let Some((name, _)) = nonstandard.exports.iter().find(|p| p.1 == id) {
return format!("in function export `{}`", name);
}
if let Some((core, _, _)) = nonstandard.implements.iter().find(|p| p.2 == id) {
let import = module.imports.get(*core);
return format!(
"in function import from `{}::{}`",
import.module, import.name
);
}
format!("in adapter function")
};
let mut us2walrus = HashMap::new();
for (us, func) in crate::sorted_iter(&nonstandard.adapters) {
if let Some(export) = export_map.get(us) {
check_standard_export(export).context(adapter_context(*us))?;
}
if let Some(import) = import_map.get(us) {
check_standard_import(import).context(adapter_context(*us))?;
}
let params = translate_tys(&func.params).context(adapter_context(*us))?;
let results = translate_tys(&func.results).context(adapter_context(*us))?;
let ty = section.types.add(params, results);
let walrus = match &func.kind {
AdapterKind::Local { .. } => section.funcs.add_local(ty, Vec::new()),
AdapterKind::Import {
module,
name,
kind: AdapterJsImportKind::Normal,
} => section.add_import_func(module, name, ty).0,
AdapterKind::Import {
module,
name,
kind: AdapterJsImportKind::Constructor,
} => {
bail!(
"interfaces types doesn't support import of `{}::{}` \
as a constructor",
module,
name
);
}
AdapterKind::Import {
module,
name,
kind: AdapterJsImportKind::Method,
} => {
bail!(
"interfaces types doesn't support import of `{}::{}` \
as a method",
module,
name
);
}
};
us2walrus.insert(*us, walrus);
}
for (_, core, adapter) in nonstandard.implements.iter() {
section.implements.add(us2walrus[adapter], *core);
}
for (name, adapter) in nonstandard.exports.iter() {
section.exports.add(name, us2walrus[adapter]);
}
for (id, func) in nonstandard.adapters.iter() {
let instructions = match &func.kind {
AdapterKind::Local { instructions } => instructions,
AdapterKind::Import { .. } => continue,
};
let result = match &mut section.funcs.get_mut(us2walrus[id]).kind {
wit_walrus::FuncKind::Local(i) => i,
_ => unreachable!(),
};
for instruction in instructions {
result.push(
translate_instruction(instruction, &us2walrus, module)
.with_context(|| adapter_context(*id))?,
);
}
}
if let Some((name, _)) = local_modules.iter().next() {
bail!(
"generating a bindings section is currently incompatible with \
local JS modules being specified as well, `{}` cannot be used \
since a standalone wasm file is being generated",
name,
);
}
if let Some((name, _)) = snippets.iter().filter(|(_, v)| !v.is_empty()).next() {
bail!(
"generating a bindings section is currently incompatible with \
local JS snippets being specified as well, `{}` cannot be used \
since a standalone wasm file is being generated",
name,
);
}
if let Some(path) = package_jsons.iter().next() {
bail!(
"generating a bindings section is currently incompatible with \
package.json being consumed as well, `{}` cannot be used \
since a standalone wasm file is being generated",
path.display(),
);
}
if let Some(id) = imports_with_catch.iter().next() {
bail!(
"{}\ngenerating a bindings section is currently incompatible with \
`#[wasm_bindgen(catch)]`",
adapter_context(*id),
);
}
if let Some(id) = imports_with_variadic.iter().next() {
bail!(
"{}\ngenerating a bindings section is currently incompatible with \
`#[wasm_bindgen(variadic)]`",
adapter_context(*id),
);
}
if let Some(enum_) = enums.iter().next() {
bail!(
"generating a bindings section is currently incompatible with \
exporting an `enum` from the wasm file, cannot export `{}`",
enum_.name,
);
}
if let Some(struct_) = structs.iter().next() {
bail!(
"generating a bindings section is currently incompatible with \
exporting a `struct` from the wasm file, cannot export `{}`",
struct_.name,
);
}
module.customs.add(section);
Ok(())
}
fn translate_instruction(
instr: &InstructionData,
us2walrus: &HashMap<AdapterId, wit_walrus::FuncId>,
module: &Module,
) -> Result<wit_walrus::Instruction, Error> {
use Instruction::*;
match &instr.instr {
Standard(s) => Ok(s.clone()),
CallAdapter(id) => {
let id = us2walrus[id];
Ok(wit_walrus::Instruction::CallAdapter(id))
}
CallExport(e) => match module.exports.get(*e).item {
walrus::ExportItem::Function(f) => Ok(wit_walrus::Instruction::CallCore(f)),
_ => bail!("can only call exported functions"),
},
CallTableElement(e) => {
let entry = wasm_bindgen_wasm_conventions::get_function_table_entry(module, *e)?;
let id = entry
.func
.ok_or_else(|| anyhow!("function table wasn't filled in a {}", e))?;
Ok(wit_walrus::Instruction::CallCore(id))
}
StringToMemory {
mem,
malloc,
realloc: _,
} => Ok(wit_walrus::Instruction::StringToMemory {
mem: *mem,
malloc: *malloc,
}),
StoreRetptr { .. } | LoadRetptr { .. } | Retptr { .. } => {
bail!("return pointers aren't supported in wasm interface types");
}
I32FromBool | BoolFromI32 => {
bail!("booleans aren't supported in wasm interface types");
}
I32FromStringFirstChar | StringFromChar => {
bail!("chars aren't supported in wasm interface types");
}
// Note: if `ExternrefLoadOwned` contained `Some`, this error message wouldn't make sense,
// but that can only occur when returning `Result`,
// in which case there'll be an earlier `UnwrapResult` instruction and we'll bail before reaching this point.
I32FromExternrefOwned | I32FromExternrefBorrow | ExternrefLoadOwned { .. } | TableGet => {
bail!("externref pass failed to sink into wasm module");
}
I32FromExternrefRustOwned { .. }
| I32FromExternrefRustBorrow { .. }
| RustFromI32 { .. } => {
bail!("rust types aren't supported in wasm interface types");
}
I32Split64 { .. } | I64FromLoHi { .. } => {
bail!("64-bit integers aren't supported in wasm-bindgen");
}
I32SplitOption64 { .. }
| I32FromOptionExternref { .. }
| I32FromOptionU32Sentinel
| I32FromOptionRust { .. }
| I32FromOptionBool
| I32FromOptionChar
| I32FromOptionEnum { .. }
| FromOptionNative { .. }
| OptionVector { .. }
| OptionString { .. }
| OptionRustFromI32 { .. }
| OptionVectorLoad { .. }
| OptionView { .. }
| OptionU32Sentinel
| ToOptionNative { .. }
| OptionBoolFromI32
| OptionCharFromI32
| OptionEnumFromI32 { .. }
| Option64FromI32 { .. } => {
bail!("optional types aren't supported in wasm bindgen");
}
UnwrapResult { .. } | UnwrapResultString { .. } => {
bail!("self-unwrapping result types aren't supported in wasm bindgen");
}
MutableSliceToMemory { .. } | VectorToMemory { .. } | VectorLoad { .. } | View { .. } => {
bail!("vector slices aren't supported in wasm interface types yet");
}
CachedStringLoad { .. } => {
bail!("cached strings aren't supported in wasm interface types");
}
StackClosure { .. } => {
bail!("closures aren't supported in wasm interface types");
}
}
}
fn check_standard_import(import: &AuxImport) -> Result<(), Error> |
fn check_standard_export(export: &AuxExport) -> Result<(), Error> {
// First up make sure this is something that's actually valid to export
// form a vanilla WebAssembly module with WebIDL bindings.
match &export.kind {
AuxExportKind::Function(_) => Ok(()),
AuxExportKind::Constructor(name) => {
bail!(
"cannot export `{}` constructor function when generating \
a standalone WebAssembly module with no JS glue",
name,
);
}
AuxExportKind::Getter { class, field, .. } => {
bail!(
"cannot export `{}::{}` getter function when generating \
a standalone WebAssembly module with no JS glue",
class,
field,
);
}
AuxExportKind::Setter { class, field, .. } => {
bail!(
"cannot export `{}::{}` setter function when generating \
a standalone WebAssembly module with no JS glue",
class,
field,
);
}
AuxExportKind::StaticFunction { class, name } => {
bail!(
"cannot export `{}::{}` static function when \
generating a standalone WebAssembly module with no \
JS glue",
class,
name
);
}
AuxExportKind::Method { class, name, .. } => {
bail!(
"cannot export `{}::{}` method when \
generating a standalone WebAssembly module with no \
JS glue",
class,
name
);
}
}
}
fn translate_tys(tys: &[AdapterType]) -> Result<Vec<wit_walrus::ValType>, Error> {
tys.iter()
.map(|ty| {
ty.to_wit()
.ok_or_else(|| anyhow!("type {:?} isn't supported in standard interface types", ty))
})
.collect()
}
| {
let desc_js = |js: &JsImport| {
let mut extra = String::new();
for field in js.fields.iter() {
extra.push_str(".");
extra.push_str(field);
}
match &js.name {
JsImportName::Global { name } | JsImportName::VendorPrefixed { name, .. } => {
format!("global `{}{}`", name, extra)
}
JsImportName::Module { module, name } => {
format!("`{}{}` from '{}'", name, extra, module)
}
JsImportName::LocalModule { module, name } => {
format!("`{}{}` from local module '{}'", name, extra, module)
}
JsImportName::InlineJs {
unique_crate_identifier,
name,
..
} => format!(
"`{}{}` from inline js in '{}'",
name, extra, unique_crate_identifier
),
}
};
let item = match import {
AuxImport::Value(AuxValue::Bare(js)) => {
if js.fields.len() == 0 {
if let JsImportName::Module { .. } = js.name {
return Ok(());
}
}
desc_js(js)
}
AuxImport::Value(AuxValue::Getter(js, name))
| AuxImport::Value(AuxValue::Setter(js, name))
| AuxImport::Value(AuxValue::ClassGetter(js, name))
| AuxImport::Value(AuxValue::ClassSetter(js, name)) => {
format!("field access of `{}` for {}", name, desc_js(js))
}
AuxImport::ValueWithThis(js, method) => format!("method `{}.{}`", desc_js(js), method),
AuxImport::Instanceof(js) => format!("instance of check of {}", desc_js(js)),
AuxImport::Static(js) => format!("static js value {}", desc_js(js)),
AuxImport::StructuralMethod(name) => format!("structural method `{}`", name),
AuxImport::StructuralGetter(name)
| AuxImport::StructuralSetter(name)
| AuxImport::StructuralClassGetter(_, name)
| AuxImport::StructuralClassSetter(_, name) => {
format!("structural field access of `{}`", name)
}
AuxImport::IndexingDeleterOfClass(_)
| AuxImport::IndexingDeleterOfObject
| AuxImport::IndexingGetterOfClass(_)
| AuxImport::IndexingGetterOfObject
| AuxImport::IndexingSetterOfClass(_)
| AuxImport::IndexingSetterOfObject => format!("indexing getters/setters/deleters"),
AuxImport::WrapInExportedClass(name) => {
format!("wrapping a pointer in a `{}` js class wrapper", name)
}
AuxImport::Intrinsic(intrinsic) => {
format!("wasm-bindgen specific intrinsic `{}`", intrinsic.name())
}
AuxImport::Closure { .. } => format!("creating a `Closure` wrapper"),
};
bail!("import of {} requires JS glue", item);
} |
main.py | # Import kivy tools
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.recycleboxlayout import RecycleBoxLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.checkbox import CheckBox
from kivy.uix.spinner import Spinner
from kivy.uix.recycleview import RecycleView
from kivy.uix.recycleview.views import RecycleDataViewBehavior
from kivy.uix.behaviors import FocusBehavior
from kivy.uix.recycleview.layout import LayoutSelectionBehavior
from kivy.properties import BooleanProperty, ObjectProperty
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.lang import Builder
# Import the kv files
Builder.load_file('./src/rv.kv')
Builder.load_file('./src/screenhome.kv')
Builder.load_file('./src/screenprofile.kv')
Builder.load_file('./src/screensettings.kv')
Builder.load_file('./src/screenproduct.kv')
Builder.load_file('./src/screenquantities.kv')
Builder.load_file('./src/screenfinal.kv')
Builder.load_file('./src/manager.kv')
# Other imports
import pandas as pd
import re
from Algo_main import algo # Import the algorithm for NutriScore computation
class SelectableRecycleBoxLayout(FocusBehavior, LayoutSelectionBehavior,
RecycleBoxLayout):
''' Add selection and focus behaviour to the view '''
pass
class SelectableGrid(RecycleDataViewBehavior, GridLayout):
''' Add selection support to the Label '''
index = None
selected = BooleanProperty(False)
selectable = BooleanProperty(True)
def refresh_view_attrs(self, rv, index, data):
''' Catch and handle the view changes '''
self.index = index
self.ids['id_label1'].text = data['label1']['text']
self.ids['id_label2'].text = data['label2']['text']
self.ids['id_label3'].text = data['label3']['text']
return super(SelectableGrid, self).refresh_view_attrs(
rv, index, data)
def on_touch_down(self, touch):
''' Add selection on touch down '''
if super(SelectableGrid, self).on_touch_down(touch):
return True
if self.collide_point(*touch.pos) and self.selectable:
return self.parent.select_with_touch(self.index, touch)
def apply_selection(self, rv, index, is_selected):
''' Respond to the selection of items '''
self.selected = is_selected
class SelectableQuantity(RecycleDataViewBehavior, GridLayout):
''' Add selection support to the Label '''
index = None
selected = BooleanProperty(False)
selectable = BooleanProperty(True)
def refresh_view_attrs(self, rv, index, data):
''' Catch and handle the view changes '''
self.index = index
self.ids['id_label1'].text = data['label1']['text']
self.ids['id_label2'].text = data['label2']['text']
self.ids['id_label3'].text = data['label3']['text']
return super(SelectableQuantity, self).refresh_view_attrs(
rv, index, data)
class RV(RecycleView):
''' Class for the RecycleView Controller '''
def __init__(self, **kwargs):
super(RV, self).__init__(**kwargs)
def upload(self, query, active):
''' Search data according to the user input '''
# Reset data
self.data = []
# Check if the Raw Food CheckBox is active or not
if active:
self.parent.parent.getSelection('API', query, True)
self.data = [{'label1': {'text': 'API'}, 'label2': {'text': query}, 'label3': {'text': 'Add/Remove'}}]
else:
isinside = allTrue
for item in query.split(): # Split the query in keywords
isinside = isinside & \
(DF['product_name'].str.contains(item, case=False) | \
DF['Brands'].str.contains(item, case=False))
if any(isinside):
selection = DF[isinside] # Select products to display
for row in selection.itertuples(): # Iterate through the columns of DF
d = {'label1': {'text': str(row[0])}, \
'label2': {'text': str(row[1])},
'label3': {'text': str(row[-1])}} # barcode, product_name, brand
self.data.append(d)
else:
isinside = DF.index.str.contains(query, case=False) # Search for Barcode
if any(isinside):
selection = DF[isinside]
for row in selection.itertuples():
d = {'label1': {'text': str(row[0])}, \
'label2': {'text': str(row[1])},
'label3': {'text': str(row[-1])}} # barcode, product_name, brand
self.data.append(d)
else:
# In case no product is found
self.data = [{'label1': {'text': ''}, \
'label2': {'text': 'No product found'}, 'label3': {'text': ''}}]
def getQuantities(self, dict):
''' Gather data for display on Quantities Screen '''
self.data = []
code = dict['code']
product_name = dict['product_name']
quantity = dict['quantity']
for index in range(len(code)):
d = {'label1': {'text': code[index]}, 'label2': {'text': product_name[index]}, \
'label3': {'text': quantity[index]}}
self.data.append(d)
class ScreenHome(Screen):
''' Class for the Home Screen. No variables or functions needed for this screen '''
pass
class ScreenProfile(Screen):
''' Class for the Profile Screen '''
def updateDF(self):
global DF
DF = pd.read_csv('https://drive.google.com/uc?export=download&id=1aLUh1UoQcS9lBa6oVRln-DuskxK5uK3y', \
index_col=[0], low_memory = False)
DF.to_csv('./data/OpenFoodFacts_final.csv.gz', compression='gzip')
self.ids['update'].text = 'Updated'
self.ids['update'].background_color = (0,1,0,1)
def update(self):
self.ids['update'].text = 'Updating'
self.ids['update'].background_color = (50/255,164/255,206/255,1)
class ScreenSettings(Screen):
''' Class for the Settings Screen '''
settings = {'rec': True,'name': '', 'surname': '', 'age': 0, 'sex': True, 'weight': 0, \
'email': '', 'activity': 0, 'days': 0}
id_profile = -999
def resetForm(self):
''' Reset the indicators of invalid input '''
self.ids.sex.color = (1,1,1,1)
self.ids.activity.color = (1,1,1,1)
self.ids.age.hint_text_color = (0.5, 0.5, 0.5, 1.0)
self.ids.weight.hint_text_color = (0.5, 0.5, 0.5, 1.0)
self.ids.days.hint_text_color = (0.5, 0.5, 0.5, 1.0)
self.ids.email.hint_text_color = (0.5, 0.5, 0.5, 1.0)
self.ids.name.hint_text_color = (0.5, 0.5, 0.5, 1.0)
self.ids.surname.hint_text_color = (0.5, 0.5, 0.5, 1.0)
def setForm(self, id_profile):
self.id_profile = id_profile
self.settings = {'rec': True,'name': '', 'surname': '', 'age': 0, 'sex': True, 'weight': 0, \
'email': '', 'activity': 0, 'days': 0}
if int(self.id_profile) >= 0:
self.ids.name.text = str(profile_list.iloc[self.id_profile]['name'])
self.ids.surname.text= str(profile_list.iloc[self.id_profile]['surname'])
self.ids.age.text = str(profile_list.iloc[self.id_profile]['age'])
if bool(profile_list.iloc[self.id_profile]['sex']):
self.ids.male.active = True
self.ids.female.active = False
else:
self.ids.male.active = False
self.ids.female.active = True
self.ids.weight.text = str(profile_list.iloc[self.id_profile]['weight'])
self.ids.email.text = str(profile_list.iloc[self.id_profile]['email'])
self.ids.days.text = str(profile_list.iloc[self.id_profile]['days'])
if int(profile_list.iloc[self.id_profile]['activity']) == 1.8: | self.ids.standing.active = True
elif int(profile_list.iloc[self.id_profile]['activity']) == 1.6:
self.ids.seated.active = False
self.ids.both.active = True
self.ids.standing.active = False
else:
self.ids.seated.active = True
self.ids.both.active = False
self.ids.standing.active = False
elif int(self.id_profile) == -999:
self.ids.name.text = ''
self.ids.surname.text = ''
self.ids.age.text = ''
self.ids.male.active = False
self.ids.female.active = False
self.ids.email.text = ''
self.ids.weight.text = ''
self.ids.seated.active = False
self.ids.both.active = False
self.ids.standing.active = False
self.ids.days.text = ''
else:
self.changeScreen(False)
def changeScreen(self, valid):
''' Handle the validity of the inputs and the change of current screen '''
if valid:
self.resetForm()
# Check name validity
if self.ids.name.text.strip() == '':
self.ids.name.hint_text_color = (1,0,0,1)
return False
# Check surname validity
elif self.ids.surname.text.strip() == '':
self.ids.surname.hint_text_color = (1,0,0,1)
return False
# Check age validity
elif self.ids.age.text.strip() == '' or int(self.ids.age.text) <= 0 or \
int(self.ids.age.text) >= 120:
self.ids.age.text = ''
self.ids.age.hint_text_color = (1,0,0,1)
return False
# Check sex validity
elif not(self.ids.male.active or self.ids.female.active):
self.ids.sex.color = (1,0,0,1)
return False
# Check email validity
elif not re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", self.ids.email.text):
self.ids.email.text = ''
self.ids.email.hint_text_color = (1,0,0,1)
return False
# Check weight validity
elif self.ids.weight.text.strip() == '' or int(self.ids.weight.text) <= 0:
self.ids.weight.text = ''
self.ids.weight.hint_text_color = (1,0,0,1)
return False
# Check activity validity
elif not(self.ids.seated.active or self.ids.both.active or self.ids.standing.active):
self.ids.activity.color = (1,0,0,1)
return False
# Check days validity
elif self.ids.days.text.strip() == '' or int(self.ids.days.text) <= 0:
self.ids.days.text = ''
self.ids.days.hint_text_color = (1,0,0,1)
return False
else: # Validation of the form and reset
self.settings['rec'] = True
self.settings['name'] = self.ids.name.text
self.settings['surname'] = self.ids.surname.text
self.settings['age'] = int(self.ids.age.text)
self.settings['weight'] = int(self.ids.weight.text)
self.settings['email'] = self.ids.email.text
self.settings['days'] = int(self.ids.days.text)
self.settings['sex'] = self.ids.male.active
if self.ids.seated.active:
self.settings['activity'] = 1.4
if self.ids.both.active:
self.settings['activity'] = 1.6
if self.ids.standing.active:
self.settings['activity'] = 1.8
self.resetForm()
else: # If the user pass the settings screen
self.settings['rec'] = False
self.manager.setSettings(self.settings, self.id_profile)
# Change the current screen
self.manager.current = 'Product Screen'
class ScreenProduct(Screen):
''' Class for the Product Screen '''
temp_dict = {'code':'', 'product_name': ''}
def getSelection(self, text1, text2, state):
# Select or deselect temporarly a product
if state:
self.temp_dict['code'] = text1
self.temp_dict['product_name'] = text2
else:
self.temp_dict['code'] = ''
self.temp_dict['product_name'] = ''
class ScreenQuantities(Screen):
''' Class for the Quantities Screen '''
temp_dict = {'code': [], 'product_name': [], 'quantity': [], 'color': []}
def initQuantity(self, data):
''' Initialize the dictionary of the products '''
if self.temp_dict['quantity'] == []:
self.temp_dict = data
self.ids.rv.getQuantities(data)
def updateQuantity(self, index, text1, text2, text3):
''' Store the quantities input by the user '''
l = len(self.temp_dict['quantity'])
if text3 == '' or text3 == '-' or int(text3) < 0:
text3 = '0'
if index < l:
self.temp_dict['code'][index] = text1
self.temp_dict['product_name'][index] = text2
self.temp_dict['quantity'][index] = text3
# Append the list of quantities if needed
else:
temp = ['0' for i in range(index-l)]
self.temp_dict['code'] = self.temp_dict['code'] + temp + [text1]
self.temp_dict['product_name'] = self.temp_dict['product_name'] + temp + [text2]
self.temp_dict['quantity'] = self.temp_dict['quantity'] + temp + [text3]
# Update the data displayed
self.initQuantity(self.temp_dict)
class ScreenFinal(Screen):
''' Class for the Final Screen. No variables or functions needed for this screen '''
pass
class Manager(ScreenManager):
''' Class for the Manager Controller. Store main data '''
selected_products = {'code': [], 'product_name': [], 'quantity': []}
settings = {'Rec': True, 'Name': '', 'Surname': '', 'Email': '', 'Age': 0, 'Sex': True, 'Pal': 0, \
'Weight': 0, 'Day': 0}
def getProfiles(self):
self.ids.screen_profile.ids.profile_spinner.values = \
[str(index + 1) + ' : ' + str(profile_list['name'][index]) + ' ' + str(profile_list['surname'][index]) \
for index in profile_list.index]
def toSettings(self, text):
if text == 'new':
id_profile = -999
elif text == 'pass':
id_profile = -1000
else:
items = text.split()
id_profile = items[0].strip()
id_profile = int(id_profile) - 1
self.ids.screen_settings.setForm(id_profile)
if id_profile != -1000:
self.current = 'Settings Screen'
def addProduct(self):
''' Add product to main storage '''
item1 = self.ids.screen_product.temp_dict['code']
item2 = self.ids.screen_product.temp_dict['product_name']
if item1 != '' and item2 != '':
self.selected_products['code'].append(item1)
self.selected_products['product_name'].append(item2)
self.selected_products['quantity'].append('0')
def deleteProduct(self):
''' Remove product of main storage '''
item1 = self.ids.screen_product.temp_dict['code']
item2 = self.ids.screen_product.temp_dict['product_name']
if item1 in self.selected_products['code'] and item2 in self.selected_products['product_name']:
self.selected_products['code'].remove(item1)
self.selected_products['product_name'].remove(item2)
self.selected_products['quantity'].pop()
def getQuantities(self, data):
''' Add quantities to main storage '''
self.selected_products['quantity'] = data['quantity']
l = len(self.selected_products['quantity'])
for item in range(l):
if self.selected_products['quantity'][item] == '':
self.selected_products['quantity'][item] = '0'
self.current = 'Final Screen'
def setSettings(self, data, new):
''' Add settings to main storage '''
self.settings['Rec'] = data['rec']
self.settings['Name'] = data['name']
self.settings['Surname'] = data['surname']
self.settings['Email'] = data['email']
self.settings['Pal'] = data['activity']
self.settings['Weight'] = data['weight']
self.settings['Day'] = data['days']
self.settings['Sex'] = data['sex']
self.settings['Age'] = data['age']
update = True
if new == -999:
temp_df = pd.DataFrame.from_dict({'index': [len(profile_list)], \
'name': [data['name']], 'surname': [data['surname']], \
'age': [data['age']], 'sex': [data['sex']], 'email': [data['email']], \
'weight': [data['weight']], \
'activity': [data['activity']], 'days': [data['days']]}).set_index('index')
new_profile_list = pd.concat([profile_list, temp_df])
elif new == -1000:
update = False
else:
temp_df = pd.DataFrame.from_dict({'name': [data['name']], 'surname': [data['surname']], \
'age': [data['age']], 'sex': [data['sex']], 'email': [data['email']], 'weight': [data['weight']], \
'activity': [data['activity']], 'days': [data['days']]})
new_profile_list= profile_list
new_profile_list.iloc[new] = temp_df.iloc[0]
if update:
new_profile_list.to_csv('./data/profile.csv', sep=';')
def computation(self):
''' Call algo for computation of NutriScore and recommendation. Display results '''
dict_product = {'Product': [], 'API': []}
for index in range(len(self.selected_products['code'])):
# Separation of API and OpenFoodFacts data
if str(self.selected_products['code'][index]) == 'API':
dict_product['API'].append((str(self.selected_products[
'product_name'][index]), int(self.selected_products['quantity'][index])))
else:
dict_product['Product'].append((str(self.selected_products[
'code'][index]), int(self.selected_products['quantity'][index])))
# Run the algorithm to get the recommendation to print on-screen
text_app_beverages, text_app_nonbeverages = algo(dict_product, self.settings, DF)
self.ids.screen_final.ids.beverages.text = text_app_beverages
self.ids.screen_final.ids.non_beverages.text = text_app_nonbeverages
class NutriScoreApp(App):
''' Main class of the App '''
def build(self):
''' Import the database for the whole application '''
global DF, allTrue, profile_list
try:
DF = pd.read_csv('./data/OpenFoodFacts_final.csv.gz', low_memory=False, index_col = [0])
allTrue = DF['product_name'].str.contains('', case=False) # True Vector of length len(DF)
profile_list = pd.read_csv('./data/profile.csv', sep=';', index_col=[0])
except:
print('Fatal error: files missing')
return Manager()
if __name__ == '__main__':
NutriScoreApp().run() | self.ids.seated.active = False
self.ids.both.active = False |
cluster_status_list_builder.go | /*
Copyright (c) 2020 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// IMPORTANT: This file has been generated automatically, refrain from modifying it manually as all
// your changes will be lost when the file is generated again.
package v1 // github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1
// ClusterStatusListBuilder contains the data and logic needed to build
// 'cluster_status' objects.
type ClusterStatusListBuilder struct {
items []*ClusterStatusBuilder
}
// NewClusterStatusList creates a new builder of 'cluster_status' objects.
func NewClusterStatusList() *ClusterStatusListBuilder {
return new(ClusterStatusListBuilder)
}
// Items sets the items of the list.
func (b *ClusterStatusListBuilder) Items(values ...*ClusterStatusBuilder) *ClusterStatusListBuilder {
b.items = make([]*ClusterStatusBuilder, len(values))
copy(b.items, values)
return b
}
// Empty returns true if the list is empty.
func (b *ClusterStatusListBuilder) Empty() bool {
return b == nil || len(b.items) == 0
}
// Copy copies the items of the given list into this builder, discarding any previous items.
func (b *ClusterStatusListBuilder) Copy(list *ClusterStatusList) *ClusterStatusListBuilder {
if list == nil || list.items == nil {
b.items = nil
} else {
b.items = make([]*ClusterStatusBuilder, len(list.items))
for i, v := range list.items {
b.items[i] = NewClusterStatus().Copy(v)
}
}
return b
}
// Build creates a list of 'cluster_status' objects using the | for i, item := range b.items {
items[i], err = item.Build()
if err != nil {
return
}
}
list = new(ClusterStatusList)
list.items = items
return
} | // configuration stored in the builder.
func (b *ClusterStatusListBuilder) Build() (list *ClusterStatusList, err error) {
items := make([]*ClusterStatus, len(b.items)) |
file.rs | use crate::model::date_time::DateTime;
use crate::model::file_sharing::{self, FileSharing, FileSharingScope, FileSharingWitness};
use crate::model::object::ObjectId;
use crate::model::user::{User, UserId};
use thiserror::Error;
use uuid::Uuid;
pub mod name;
pub use name::FileName;
pub mod type_;
pub use type_::FileType;
pub mod digest;
pub use digest::FileBlake3Digest;
pub mod size;
pub use size::FileSize;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct FileId(Uuid);
impl FileId {
pub fn from_uuid(uuid: Uuid) -> FileId {
FileId(uuid)
}
pub fn to_uuid(&self) -> Uuid {
self.0
}
}
#[derive(Debug, Clone)]
pub struct File {
pub id: FileId,
pub created_at: DateTime,
pub author_id: UserId,
pub object_id: ObjectId,
pub blake3_digest: FileBlake3Digest,
pub name: Option<FileName>,
pub type_: FileType,
pub size: FileSize,
}
#[derive(Debug, Error, Clone)]
#[error("the file cannot be shared by the user")]
pub struct NonSharableFileError {
_priv: (),
}
#[derive(Debug, Clone, Copy)]
pub enum ShareWithExpirationErrorKind {
NonSharableFile,
InvalidExpirationDate,
}
#[derive(Debug, Error, Clone)]
#[error("the file cannot be shared")]
pub struct ShareWithExpirationError {
kind: ShareWithExpirationErrorKind,
}
impl ShareWithExpirationError {
pub fn kind(&self) -> ShareWithExpirationErrorKind {
self.kind
}
fn from_sharing_expiration_error(_err: file_sharing::InvalidExpirationDateError) -> Self {
ShareWithExpirationError {
kind: ShareWithExpirationErrorKind::InvalidExpirationDate,
}
}
}
impl File {
pub fn is_visible_to(&self, user: &User) -> bool {
&self.author_id == user.id()
}
pub fn is_visible_to_with_sharing(&self, witness: &FileSharingWitness) -> bool {
self.id == witness.file_id()
}
pub fn can_be_shared_by(&self, user: &User) -> bool {
self.is_visible_to(user) && &self.author_id == user.id()
}
pub fn share_by(
&self,
user: &User,
scope: FileSharingScope,
) -> Result<FileSharing, NonSharableFileError> {
if !self.can_be_shared_by(user) {
return Err(NonSharableFileError { _priv: () });
}
Ok(FileSharing::new(self.id, scope))
}
pub fn share_with_expiration_by(
&self,
user: &User,
scope: FileSharingScope,
expires_at: DateTime,
) -> Result<FileSharing, ShareWithExpirationError> {
if !self.can_be_shared_by(user) {
return Err(ShareWithExpirationError {
kind: ShareWithExpirationErrorKind::NonSharableFile,
});
}
FileSharing::with_expiration(self.id, scope, expires_at)
.map_err(ShareWithExpirationError::from_sharing_expiration_error)
}
}
#[cfg(test)]
mod tests {
use super::FileSharingScope;
use crate::model::date_time::DateTime;
use crate::test::model as test_model;
#[test]
fn | () {
let user = test_model::new_general_user();
let (file, _) = test_model::new_file(user.id().clone());
assert!(file.is_visible_to(&user));
}
#[test]
fn test_visibility_general_other() {
let user = test_model::new_general_user();
let other = test_model::new_general_user();
let (file, _) = test_model::new_file(other.id().clone());
assert!(!file.is_visible_to(&user));
}
#[test]
fn test_visibility_committee_other() {
let user = test_model::new_committee_user();
let other = test_model::new_general_user();
let (file, _) = test_model::new_file(other.id().clone());
assert!(!file.is_visible_to(&user));
}
#[test]
fn test_visibility_operator_other() {
let user = test_model::new_operator_user();
let other = test_model::new_general_user();
let (file, _) = test_model::new_file(other.id().clone());
assert!(!file.is_visible_to(&user));
}
#[test]
fn test_visibility_admin_other() {
let user = test_model::new_admin_user();
let other = test_model::new_general_user();
let (file, _) = test_model::new_file(other.id().clone());
assert!(!file.is_visible_to(&user));
}
#[test]
fn test_can_be_shared_by_general_owner() {
let user = test_model::new_general_user();
let (file, _) = test_model::new_file(user.id().clone());
assert!(file.can_be_shared_by(&user));
}
#[test]
fn test_can_be_shared_by_general_other() {
let user = test_model::new_general_user();
let other = test_model::new_general_user();
let (file, _) = test_model::new_file(other.id().clone());
assert!(!file.can_be_shared_by(&user));
}
#[test]
fn test_can_be_shared_by_admin_other() {
let user = test_model::new_admin_user();
let other = test_model::new_general_user();
let (file, _) = test_model::new_file(other.id().clone());
assert!(!file.can_be_shared_by(&user));
}
#[test]
fn test_share_by_general_owner() {
let user = test_model::new_general_user();
let (file, _) = test_model::new_file(user.id().clone());
assert!(matches!(
file.share_by(&user, FileSharingScope::Public),
Ok(sharing)
if sharing.file_id() == file.id
));
}
#[test]
fn test_share_by_general_other() {
use super::NonSharableFileError;
let user = test_model::new_general_user();
let other = test_model::new_general_user();
let (file, _) = test_model::new_file(other.id().clone());
assert!(matches!(
file.share_by(&user, FileSharingScope::Public),
Err(NonSharableFileError { .. })
));
}
#[test]
fn test_share_by_admin_other() {
use super::NonSharableFileError;
let user = test_model::new_admin_user();
let other = test_model::new_general_user();
let (file, _) = test_model::new_file(other.id().clone());
assert!(matches!(
file.share_by(&user, FileSharingScope::Public),
Err(NonSharableFileError { .. })
));
}
#[test]
fn test_share_with_expiration_by_general_owner() {
let user = test_model::new_general_user();
let (file, _) = test_model::new_file(user.id().clone());
let scope = FileSharingScope::Public;
let expires_at = DateTime::from_utc(chrono::Utc::now() + chrono::Duration::days(1));
assert!(matches!(
file.share_with_expiration_by(&user, scope, expires_at),
Ok(sharing)
if sharing.file_id() == file.id
));
}
#[test]
fn test_share_with_expiration_by_admin_other() {
use super::ShareWithExpirationErrorKind;
let user = test_model::new_admin_user();
let other = test_model::new_general_user();
let (file, _) = test_model::new_file(other.id().clone());
let scope = FileSharingScope::Public;
let expires_at = DateTime::from_utc(chrono::Utc::now() + chrono::Duration::days(1));
assert!(matches!(
file.share_with_expiration_by(&user, scope, expires_at),
Err(err)
if matches!(err.kind(), ShareWithExpirationErrorKind::NonSharableFile)
));
}
#[test]
fn test_share_with_past_expiration_by_general_owner() {
use super::ShareWithExpirationErrorKind;
let user = test_model::new_general_user();
let (file, _) = test_model::new_file(user.id().clone());
let scope = FileSharingScope::Public;
let expires_at = DateTime::from_utc(chrono::Utc::now() - chrono::Duration::days(1));
assert!(matches!(
file.share_with_expiration_by(&user, scope, expires_at),
Err(err)
if matches!(err.kind(), ShareWithExpirationErrorKind::InvalidExpirationDate)
));
}
}
| test_visibility_general_owner |
4.py | enru=open('en-ru.txt','r')
input=open('input.txt','r')
output=open('output.txt','w')
s=enru.read()
x=''
prov={'q','w','e','r','t','y','u','i','o','p','a','s','d','f','g','h','j','k','l','z','x','c','v','b','n','m'}
slovar={}
s=s.replace('\t-\t',' ')
while len(s)>0:
slovar[s[:s.index(' ')]]=s[s.index(' '):s.index('\n')]
s=s[s.index('\n')+1:]
print(slovar)
s=input.read()
s=s.lower()
while len(s)>0: | else:
print(x,a, file=output, sep='',end='')
x=''
else:
x+=a
s=s[1:] | a=s[0]
if a not in prov:
if x in slovar:
print(slovar[x],a, file=output, sep='',end='') |
checkbox.min.js | module.exports=function(e){function t(o){if(n[o])return n[o].exports;var r=n[o]={i:o,l:!1,exports:{}};return e[o].call(r.exports,r,r.exports,t),r.l=!0,r.exports}var n={};return t.m=e,t.c=n,t.d=function(e,n,o){t.o(e,n)||Object.defineProperty(e,n,{configurable:!1,enumerable:!0,get:o})},t.n=function(e){var n=e&&e.__esModule?function(){return e.default}:function(){return e};return t.d(n,"a",n),n},t.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},t.p="./",t(t.s=167)}({1:function(e,t){e.exports=function(e,t,n,o,r){var c,i=e=e||{},a=typeof e.default;"object"!==a&&"function"!==a||(c=e,i=e.default);var s="function"==typeof i?i.options:i;t&&(s.render=t.render,s.staticRenderFns=t.staticRenderFns),o&&(s._scopeId=o);var u;if(r?(u=function(e){e=e||this.$vnode&&this.$vnode.ssrContext||this.parent&&this.parent.$vnode&&this.parent.$vnode.ssrContext,e||"undefined"==typeof __VUE_SSR_CONTEXT__||(e=__VUE_SSR_CONTEXT__),n&&n.call(this,e),e&&e._registeredComponents&&e._registeredComponents.add(r)},s._ssrRegister=u):n&&(u=n),u){var l=s.functional,p=l?s.render:s.beforeCreate;l?s.render=function(e,t){return u.call(t),p(e,t)}:s.beforeCreate=p?[].concat(p,u):[u]}return{esModule:c,exports:i,options:s}}},100:function(e,t,n){function | (e){n(101)}var r=n(1)(n(102),n(103),o,null,null);e.exports=r.exports},101:function(e,t){},102:function(e,t,n){var o,r,c;!function(n,i){r=[e,t],o=i,void 0!==(c="function"==typeof o?o.apply(t,r):o)&&(e.exports=c)}(0,function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0});t.default={name:"cube-checkbox",props:{value:{type:[Boolean,String]},label:{type:[Boolean,String]},disabled:{type:Boolean,default:!1},option:{type:[Boolean,String,Object],default:function(){return{_def_option:!0}}},position:{type:String,default:"left"},shape:{type:String,default:"circle"},hollowStyle:{type:Boolean,default:!1}},data:function(){var e=this.$parent,t=e.$data._checkboxGroup;return{isInGroup:t,isInHorizontalGroup:t&&e.$props.horizontal}},computed:{computedOption:function(){var e=this.option,t=this.label,n=this.disabled;return!0===e._def_option?e={label:t,value:t,disabled:n}:"string"==typeof e&&(e={label:e,value:e,disabled:!1}),e},checkValue:{get:function(){return this.isInGroup?this.$parent.value.indexOf(this.computedOption.value)>-1:Boolean(this.value)},set:function(e){var t=this.computedOption.value,n=t&&e?t:e,o=e?"checked":"cancel-checked";this.$emit("input",n),this.isInGroup&&this.$parent.$emit(o,t||e,this)}},_containerClass:function(){return{"cube-checkbox-hollow":this.hollowStyle,"cube-checkbox_checked":this.checkValue,"cube-checkbox_disabled":this.computedOption.disabled,"border-right-1px":this.isInHorizontalGroup}},_wrapClass:function(){if(this.isInGroup&&!this.isInHorizontalGroup)return"border-bottom-1px"},isSquare:function(){return"square"===this.shape||this.hollowStyle},_borderIconClass:function(){return this.isSquare?"cubeic-square-border":"cubeic-round-border"},_rightIconClass:function(){return this.isSquare?"cubeic-square-right":"cubeic-right"}}},e.exports=t.default})},103:function(e,t){e.exports={render:function(){var e=this,t=e.$createElement,n=e._self._c||t;return n("div",{staticClass:"cube-checkbox",class:e._containerClass,attrs:{"data-pos":e.position}},[n("label",{staticClass:"cube-checkbox-wrap",class:e._wrapClass},[n("input",{directives:[{name:"model",rawName:"v-model",value:e.checkValue,expression:"checkValue"}],staticClass:"cube-checkbox-input",attrs:{type:"checkbox",disabled:e.computedOption.disabled},domProps:{checked:Array.isArray(e.checkValue)?e._i(e.checkValue,null)>-1:e.checkValue},on:{change:function(t){var n=e.checkValue,o=t.target,r=!!o.checked;if(Array.isArray(n)){var c=e._i(n,null);o.checked?c<0&&(e.checkValue=n.concat([null])):c>-1&&(e.checkValue=n.slice(0,c).concat(n.slice(c+1)))}else e.checkValue=r}}}),e._v(" "),n("span",{staticClass:"cube-checkbox-ui",class:e._borderIconClass},[n("i",{class:e._rightIconClass})]),e._v(" "),n("span",{staticClass:"cube-checkbox-label"},[e._t("default",[e._v(e._s(e.computedOption.label))])],2)])])},staticRenderFns:[]}},167:function(e,t,n){var o,r,c;!function(i,a){r=[e,t,n(100)],o=a,void 0!==(c="function"==typeof o?o.apply(t,r):o)&&(e.exports=c)}(0,function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var o=function(e){return e&&e.__esModule?e:{default:e}}(n);o.default.install=function(e){e.component(o.default.name,o.default)},t.default=o.default,e.exports=t.default})}}); | o |
ssd.py | """Keras implementation of SSD."""
import keras.backend as K
from keras.layers import Activation
from keras.layers import AtrousConvolution2D
from keras.layers import Convolution2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import GlobalAveragePooling2D
from keras.layers import Input
from keras.layers import MaxPooling2D
from keras.layers import merge
from keras.layers import Reshape
from keras.layers import ZeroPadding2D
from keras.models import Model
from ssd_layers import Normalize
from ssd_layers import PriorBox
def SSD300(input_shape, num_classes=21):
"""SSD300 architecture.
# Arguments
input_shape: Shape of the input image,
expected to be either (300, 300, 3) or (3, 300, 300)(not tested).
num_classes: Number of classes including background.
# References
https://arxiv.org/abs/1512.02325
"""
net = {}
# Block 1 卷积层块
input_tensor = input_tensor = Input(shape=input_shape)
img_size = (input_shape[1], input_shape[0])
net['input'] = input_tensor | # bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
# kernel_constraint=None, bias_constraint=None)
net['conv1_1'] = Convolution2D(64, 3, 3, # 64个过滤器;kernel_size:3,卷积窗口大小;strides:步长;
activation='relu', # 激活函数:ReLU
border_mode='same', # 过滤模式:same/valid
name='conv1_1')(net['input'])
net['conv1_2'] = Convolution2D(64, 3, 3,
activation='relu',
border_mode='same',
name='conv1_2')(net['conv1_1'])
# 对空间数据的最大池化
# keras.layers.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)
# strides 默认为 None,为 None 时大小等于
net['pool1'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same',
name='pool1')(net['conv1_2'])
# Block 2 卷积层块
net['conv2_1'] = Convolution2D(128, 3, 3,
activation='relu',
border_mode='same',
name='conv2_1')(net['pool1'])
net['conv2_2'] = Convolution2D(128, 3, 3,
activation='relu',
border_mode='same',
name='conv2_2')(net['conv2_1'])
net['pool2'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same',
name='pool2')(net['conv2_2'])
# Block 3 卷积层块
net['conv3_1'] = Convolution2D(256, 3, 3,
activation='relu',
border_mode='same',
name='conv3_1')(net['pool2'])
net['conv3_2'] = Convolution2D(256, 3, 3,
activation='relu',
border_mode='same',
name='conv3_2')(net['conv3_1'])
net['conv3_3'] = Convolution2D(256, 3, 3,
activation='relu',
border_mode='same',
name='conv3_3')(net['conv3_2'])
net['pool3'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same',
name='pool3')(net['conv3_3'])
# Block 4 卷积层块
net['conv4_1'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv4_1')(net['pool3'])
net['conv4_2'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv4_2')(net['conv4_1'])
net['conv4_3'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv4_3')(net['conv4_2'])
net['pool4'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same',
name='pool4')(net['conv4_3'])
# Block 5 卷积层块
net['conv5_1'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv5_1')(net['pool4'])
net['conv5_2'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv5_2')(net['conv5_1'])
net['conv5_3'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv5_3')(net['conv5_2'])
net['pool5'] = MaxPooling2D((3, 3), strides=(1, 1), border_mode='same',
name='pool5')(net['conv5_3'])
# FC6 该层对二维输入进行Atrous卷积,也即膨胀卷积或带孔洞的卷积。
net['fc6'] = AtrousConvolution2D(1024, 3, 3, atrous_rate=(6, 6),
activation='relu', border_mode='same',
name='fc6')(net['pool5'])
# x = Dropout(0.5, name='drop6')(x)
# FC7
net['fc7'] = Convolution2D(1024, 1, 1, activation='relu',
border_mode='same', name='fc7')(net['fc6'])
# x = Dropout(0.5, name='drop7')(x)
# Block 6
net['conv6_1'] = Convolution2D(256, 1, 1, activation='relu',
border_mode='same',
name='conv6_1')(net['fc7'])
net['conv6_2'] = Convolution2D(512, 3, 3, subsample=(2, 2),
activation='relu', border_mode='same',
name='conv6_2')(net['conv6_1'])
# Block 7
net['conv7_1'] = Convolution2D(128, 1, 1, activation='relu',
border_mode='same',
name='conv7_1')(net['conv6_2'])
net['conv7_2'] = ZeroPadding2D()(net['conv7_1'])
net['conv7_2'] = Convolution2D(256, 3, 3, subsample=(2, 2),
activation='relu', border_mode='valid',
name='conv7_2')(net['conv7_2'])
# Block 8
net['conv8_1'] = Convolution2D(128, 1, 1, activation='relu',
border_mode='same',
name='conv8_1')(net['conv7_2'])
net['conv8_2'] = Convolution2D(256, 3, 3, subsample=(2, 2),
activation='relu', border_mode='same',
name='conv8_2')(net['conv8_1'])
# Last Pool
net['pool6'] = GlobalAveragePooling2D(name='pool6')(net['conv8_2'])
# Prediction from conv4_3
# keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)
# axis: 整数,需要标准化的轴 (通常是特征轴)
# 批量标准化层 (Ioffe and Szegedy, 2014)。在每一个批次的数据中标准化前一层的激活项, 即,应用一个维持激活项平均值接近 0,标准差接近 1 的转换。
net['conv4_3_norm'] = Normalize(20, name='conv4_3_norm')(net['conv4_3'])
num_priors = 3
x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
name='conv4_3_norm_mbox_loc')(net['conv4_3_norm'])
net['conv4_3_norm_mbox_loc'] = x
flatten = Flatten(name='conv4_3_norm_mbox_loc_flat')
net['conv4_3_norm_mbox_loc_flat'] = flatten(net['conv4_3_norm_mbox_loc'])
name = 'conv4_3_norm_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
name=name)(net['conv4_3_norm'])
net['conv4_3_norm_mbox_conf'] = x
flatten = Flatten(name='conv4_3_norm_mbox_conf_flat')
net['conv4_3_norm_mbox_conf_flat'] = flatten(net['conv4_3_norm_mbox_conf'])
priorbox = PriorBox(img_size, 30.0, aspect_ratios=[2],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv4_3_norm_mbox_priorbox')
net['conv4_3_norm_mbox_priorbox'] = priorbox(net['conv4_3_norm'])
# Prediction from fc7
num_priors = 6
net['fc7_mbox_loc'] = Convolution2D(num_priors * 4, 3, 3,
border_mode='same',
name='fc7_mbox_loc')(net['fc7'])
flatten = Flatten(name='fc7_mbox_loc_flat')
net['fc7_mbox_loc_flat'] = flatten(net['fc7_mbox_loc'])
name = 'fc7_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
net['fc7_mbox_conf'] = Convolution2D(num_priors * num_classes, 3, 3,
border_mode='same',
name=name)(net['fc7'])
flatten = Flatten(name='fc7_mbox_conf_flat')
net['fc7_mbox_conf_flat'] = flatten(net['fc7_mbox_conf'])
priorbox = PriorBox(img_size, 60.0, max_size=114.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='fc7_mbox_priorbox')
net['fc7_mbox_priorbox'] = priorbox(net['fc7'])
# Prediction from conv6_2
num_priors = 6
x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
name='conv6_2_mbox_loc')(net['conv6_2'])
net['conv6_2_mbox_loc'] = x
flatten = Flatten(name='conv6_2_mbox_loc_flat')
net['conv6_2_mbox_loc_flat'] = flatten(net['conv6_2_mbox_loc'])
name = 'conv6_2_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
name=name)(net['conv6_2'])
net['conv6_2_mbox_conf'] = x
flatten = Flatten(name='conv6_2_mbox_conf_flat')
net['conv6_2_mbox_conf_flat'] = flatten(net['conv6_2_mbox_conf'])
priorbox = PriorBox(img_size, 114.0, max_size=168.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv6_2_mbox_priorbox')
net['conv6_2_mbox_priorbox'] = priorbox(net['conv6_2'])
# Prediction from conv7_2
num_priors = 6
x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
name='conv7_2_mbox_loc')(net['conv7_2'])
net['conv7_2_mbox_loc'] = x
flatten = Flatten(name='conv7_2_mbox_loc_flat')
net['conv7_2_mbox_loc_flat'] = flatten(net['conv7_2_mbox_loc'])
name = 'conv7_2_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
name=name)(net['conv7_2'])
net['conv7_2_mbox_conf'] = x
flatten = Flatten(name='conv7_2_mbox_conf_flat')
net['conv7_2_mbox_conf_flat'] = flatten(net['conv7_2_mbox_conf'])
priorbox = PriorBox(img_size, 168.0, max_size=222.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv7_2_mbox_priorbox')
net['conv7_2_mbox_priorbox'] = priorbox(net['conv7_2'])
# Prediction from conv8_2
num_priors = 6
x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
name='conv8_2_mbox_loc')(net['conv8_2'])
net['conv8_2_mbox_loc'] = x
flatten = Flatten(name='conv8_2_mbox_loc_flat')
net['conv8_2_mbox_loc_flat'] = flatten(net['conv8_2_mbox_loc'])
name = 'conv8_2_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
name=name)(net['conv8_2'])
net['conv8_2_mbox_conf'] = x
flatten = Flatten(name='conv8_2_mbox_conf_flat')
net['conv8_2_mbox_conf_flat'] = flatten(net['conv8_2_mbox_conf'])
priorbox = PriorBox(img_size, 222.0, max_size=276.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv8_2_mbox_priorbox')
net['conv8_2_mbox_priorbox'] = priorbox(net['conv8_2'])
# Prediction from pool6
num_priors = 6
x = Dense(num_priors * 4, name='pool6_mbox_loc_flat')(net['pool6'])
net['pool6_mbox_loc_flat'] = x
name = 'pool6_mbox_conf_flat'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Dense(num_priors * num_classes, name=name)(net['pool6'])
net['pool6_mbox_conf_flat'] = x
priorbox = PriorBox(img_size, 276.0, max_size=330.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='pool6_mbox_priorbox')
if K.image_dim_ordering() == 'tf':
target_shape = (1, 1, 256)
else:
target_shape = (256, 1, 1)
net['pool6_reshaped'] = Reshape(target_shape,
name='pool6_reshaped')(net['pool6'])
net['pool6_mbox_priorbox'] = priorbox(net['pool6_reshaped'])
# Gather all predictions
net['mbox_loc'] = merge([net['conv4_3_norm_mbox_loc_flat'],
net['fc7_mbox_loc_flat'],
net['conv6_2_mbox_loc_flat'],
net['conv7_2_mbox_loc_flat'],
net['conv8_2_mbox_loc_flat'],
net['pool6_mbox_loc_flat']],
mode='concat', concat_axis=1, name='mbox_loc')
net['mbox_conf'] = merge([net['conv4_3_norm_mbox_conf_flat'],
net['fc7_mbox_conf_flat'],
net['conv6_2_mbox_conf_flat'],
net['conv7_2_mbox_conf_flat'],
net['conv8_2_mbox_conf_flat'],
net['pool6_mbox_conf_flat']],
mode='concat', concat_axis=1, name='mbox_conf')
net['mbox_priorbox'] = merge([net['conv4_3_norm_mbox_priorbox'],
net['fc7_mbox_priorbox'],
net['conv6_2_mbox_priorbox'],
net['conv7_2_mbox_priorbox'],
net['conv8_2_mbox_priorbox'],
net['pool6_mbox_priorbox']],
mode='concat', concat_axis=1,
name='mbox_priorbox')
if hasattr(net['mbox_loc'], '_keras_shape'):
num_boxes = net['mbox_loc']._keras_shape[-1] // 4
elif hasattr(net['mbox_loc'], 'int_shape'):
num_boxes = K.int_shape(net['mbox_loc'])[-1] // 4
net['mbox_loc'] = Reshape((num_boxes, 4),
name='mbox_loc_final')(net['mbox_loc'])
net['mbox_conf'] = Reshape((num_boxes, num_classes),
name='mbox_conf_logits')(net['mbox_conf'])
net['mbox_conf'] = Activation('softmax',
name='mbox_conf_final')(net['mbox_conf'])
net['predictions'] = merge([net['mbox_loc'],
net['mbox_conf'],
net['mbox_priorbox']],
mode='concat', concat_axis=2,
name='predictions')
model = Model(net['input'], net['predictions'])
return model | # 二维卷积层对二维输入进行滑动窗卷积
# keras.layers.Conv2D(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None,
# dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', |
socket.rs | use super::super::{
monitor::Monitor
};
use std::{
time,
thread,
os::unix::net::{
UnixListener
},
io::{
self,
Read,
BufReader,
Write
},
sync::{
Arc,
Mutex
},
path::Path,
fs
};
use serde::{Serialize, Deserialize};
use bincode;
pub const SOCKET_ADDR: &str = "/tmp/autoplank.sock";
#[derive(Debug, Serialize, Deserialize)]
pub enum SocketMessage {
Ok(Vec<Monitor>),
Err(String),
RefreshMonitors
}
pub fn socket(m: Arc<Mutex<Vec<Monitor>>>) -> Result<(), io::Error> | {
if Path::new(SOCKET_ADDR).exists() {
fs::remove_file(SOCKET_ADDR)?;
}
let listener = UnixListener::bind(SOCKET_ADDR)?;
for stream in listener.incoming() {
let mut s = stream?;
thread::sleep(time::Duration::from_millis(10));
let mut buf = BufReader::new(&s);
let mut data = [0u8; 32];
let len = buf.read(&mut data)?;
let msg: SocketMessage = match bincode::deserialize(&data[0..len]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
continue;
}
};
match msg {
SocketMessage::RefreshMonitors => {
println!("=> Scanning for monitors...");
let monitors = padlock::mutex_lock(&m, |lock| {
*lock = Monitor::get_all();
lock.clone()
});
let data = bincode::serialize(&SocketMessage::Ok(monitors)).unwrap();
s.write(&data[..])?;
},
_ => eprintln!("Received invalid data")
};
}
Ok(())
} |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.